From 66dc1887ed21b4035069a660e1645c9bcac37880 Mon Sep 17 00:00:00 2001
From: Adam Gemmell <adam.gemmell@arm.com>
Date: Wed, 25 Oct 2023 14:04:37 +0100
Subject: [PATCH 1/6] Add SVE support to stdarch-verify

Co-authored-by: Jamie Cunliffe <Jamie.Cunliffe@arm.com>
Co-authored-by: Jacob Bramley <jacob.bramley@arm.com>
Co-authored-by: Luca Vizzarro <Luca.Vizzarro@arm.com>
---
 crates/stdarch-verify/src/lib.rs    |     92 +-
 crates/stdarch-verify/tests/arm.rs  |    184 +-
 intrinsics_data/arm_intrinsics.json | 190680 ++++++++++++++++++++++++-
 vendor.yml                          |      3 +-
 4 files changed, 190893 insertions(+), 66 deletions(-)

diff --git a/crates/stdarch-verify/src/lib.rs b/crates/stdarch-verify/src/lib.rs
index 1eb939abcd..6f3a2c5a7a 100644
--- a/crates/stdarch-verify/src/lib.rs
+++ b/crates/stdarch-verify/src/lib.rs
@@ -45,7 +45,9 @@ fn functions(input: TokenStream, dirs: &[&str]) -> TokenStream {
     for &mut (ref mut file, ref path) in &mut files {
         for mut item in file.items.drain(..) {
             match item {
-                syn::Item::Fn(f) => functions.push((f, path)),
+                syn::Item::Fn(f) => {
+                    functions.push((f, path));
+                }
                 syn::Item::Mod(ref mut m) => {
                     if let Some(ref mut m) = m.content {
                         for i in m.1.drain(..) {
@@ -71,12 +73,9 @@ fn functions(input: TokenStream, dirs: &[&str]) -> TokenStream {
     assert!(!tests.is_empty());
 
     functions.retain(|(f, _)| {
-        if let syn::Visibility::Public(_) = f.vis {
-            if f.sig.unsafety.is_some() {
-                return true;
-            }
-        }
-        false
+        matches!(f.vis, syn::Visibility::Public(_))
+            // Many SVE intrinsics are safe
+            && (f.sig.unsafety.is_some() || f.sig.ident.to_string().starts_with("sv"))
     });
     assert!(!functions.is_empty());
 
@@ -97,11 +96,11 @@ fn functions(input: TokenStream, dirs: &[&str]) -> TokenStream {
                 arguments.push(to_type(ty));
             }
             for generic in f.sig.generics.params.iter() {
-                let ty = match *generic {
-                    syn::GenericParam::Const(ref c) => &c.ty,
+                match *generic {
+                    syn::GenericParam::Const(ref c) => const_arguments.push(to_type(&c.ty)),
+                    syn::GenericParam::Type(_) => (),
                     _ => panic!("invalid generic argument on {name}"),
                 };
-                const_arguments.push(to_type(ty));
             }
             let ret = match f.sig.output {
                 syn::ReturnType::Default => quote! { None },
@@ -118,25 +117,31 @@ fn functions(input: TokenStream, dirs: &[&str]) -> TokenStream {
             };
 
             let required_const = find_required_const("rustc_args_required_const", &f.attrs);
-            let mut legacy_const_generics =
+            let mut const_generics_indices =
                 find_required_const("rustc_legacy_const_generics", &f.attrs);
-            if !required_const.is_empty() && !legacy_const_generics.is_empty() {
+            if !required_const.is_empty() && !const_generics_indices.is_empty() {
                 panic!(
                     "Can't have both #[rustc_args_required_const] and \
                      #[rustc_legacy_const_generics]"
                 );
             }
 
+            // Newer intrinsics don't have legacy support - assume they belong at the end of the argument list
+            if required_const.is_empty() && const_generics_indices.is_empty() {
+                const_generics_indices =
+                    (arguments.len()..(arguments.len() + const_arguments.len())).collect();
+            }
+
             // The list of required consts, used to verify the arguments, comes from either the
             // `rustc_args_required_const` or the `rustc_legacy_const_generics` attribute.
             let required_const = if required_const.is_empty() {
-                legacy_const_generics.clone()
+                const_generics_indices.clone()
             } else {
                 required_const
             };
 
-            legacy_const_generics.sort();
-            for (idx, ty) in legacy_const_generics
+            const_generics_indices.sort();
+            for (idx, ty) in const_generics_indices
                 .into_iter()
                 .zip(const_arguments.into_iter())
             {
@@ -145,12 +150,12 @@ fn functions(input: TokenStream, dirs: &[&str]) -> TokenStream {
 
             // strip leading underscore from fn name when building a test
             // _mm_foo -> mm_foo such that the test name is test_mm_foo.
-            let test_name_string = format!("{name}");
-            let mut test_name_id = test_name_string.as_str();
-            while test_name_id.starts_with('_') {
-                test_name_id = &test_name_id[1..];
-            }
-            let has_test = tests.contains(&format!("test_{test_name_id}"));
+            let test_name = name.to_string();
+            let test_name_id = test_name.trim_start_matches('_');
+            let has_test = tests.contains(&format!("test_{test_name_id}"))
+                // SVE load/store tests
+                || tests.iter().any(|t| t.starts_with(&format!("test_{test_name_id}"))
+                                        || t.ends_with(&format!("_with_{test_name_id}")));
 
             quote! {
                 Function {
@@ -213,8 +218,53 @@ fn to_type(t: &syn::Type) -> proc_macro2::TokenStream {
             "p16" => quote! { &P16 },
             "Ordering" => quote! { &ORDERING },
             "CpuidResult" => quote! { &CPUID },
+            "T" => quote! { &GENERICT },
 
             // arm ...
+            "svbool_t" => quote! { &SVBOOL },
+            "svint8_t" => quote! { &SVI8 },
+            "svint8x2_t" => quote! { &SVI8X2 },
+            "svint8x3_t" => quote! { &SVI8X3 },
+            "svint8x4_t" => quote! { &SVI8X4 },
+            "svint16_t" => quote! { &SVI16 },
+            "svint16x2_t" => quote! { &SVI16X2 },
+            "svint16x3_t" => quote! { &SVI16X3 },
+            "svint16x4_t" => quote! { &SVI16X4 },
+            "svint32_t" => quote! { &SVI32 },
+            "svint32x2_t" => quote! { &SVI32X2 },
+            "svint32x3_t" => quote! { &SVI32X3 },
+            "svint32x4_t" => quote! { &SVI32X4 },
+            "svint64_t" => quote! { &SVI64 },
+            "svint64x2_t" => quote! { &SVI64X2 },
+            "svint64x3_t" => quote! { &SVI64X3 },
+            "svint64x4_t" => quote! { &SVI64X4 },
+            "svuint8_t" => quote! { &SVU8 },
+            "svuint8x2_t" => quote! { &SVU8X2 },
+            "svuint8x3_t" => quote! { &SVU8X3 },
+            "svuint8x4_t" => quote! { &SVU8X4 },
+            "svuint16_t" => quote! { &SVU16 },
+            "svuint16x2_t" => quote! { &SVU16X2 },
+            "svuint16x3_t" => quote! { &SVU16X3 },
+            "svuint16x4_t" => quote! { &SVU16X4 },
+            "svuint32_t" => quote! { &SVU32 },
+            "svuint32x2_t" => quote! { &SVU32X2 },
+            "svuint32x3_t" => quote! { &SVU32X3 },
+            "svuint32x4_t" => quote! { &SVU32X4 },
+            "svuint64_t" => quote! { &SVU64 },
+            "svuint64x2_t" => quote! { &SVU64X2 },
+            "svuint64x3_t" => quote! { &SVU64X3 },
+            "svuint64x4_t" => quote! { &SVU64X4 },
+            "svfloat32_t" => quote! { &SVF32 },
+            "svfloat32x2_t" => quote! { &SVF32X2 },
+            "svfloat32x3_t" => quote! { &SVF32X3 },
+            "svfloat32x4_t" => quote! { &SVF32X4 },
+            "svfloat64_t" => quote! { &SVF64 },
+            "svfloat64x2_t" => quote! { &SVF64X2 },
+            "svfloat64x3_t" => quote! { &SVF64X3 },
+            "svfloat64x4_t" => quote! { &SVF64X4 },
+            "svprfop" => quote! { &SVPRFOP },
+            "svpattern" => quote! { &SVPATTERN },
+
             "int8x4_t" => quote! { &I8X4 },
             "int8x8_t" => quote! { &I8X8 },
             "int8x8x2_t" => quote! { &I8X8X2 },
diff --git a/crates/stdarch-verify/tests/arm.rs b/crates/stdarch-verify/tests/arm.rs
index d6f8e69acb..cb3a2e6355 100644
--- a/crates/stdarch-verify/tests/arm.rs
+++ b/crates/stdarch-verify/tests/arm.rs
@@ -26,7 +26,10 @@ static U16: Type = Type::PrimUnsigned(16);
 static U32: Type = Type::PrimUnsigned(32);
 static U64: Type = Type::PrimUnsigned(64);
 static U8: Type = Type::PrimUnsigned(8);
+static BOOL: Type = Type::PrimBool;
+static VOID: Type = Type::Void;
 static NEVER: Type = Type::Never;
+static GENERICT: Type = Type::GenericParam("T");
 
 static F16X4: Type = Type::F(16, 4, 1);
 static F16X4X2: Type = Type::F(16, 4, 2);
@@ -148,18 +151,70 @@ static U8X8X2: Type = Type::U(8, 8, 2);
 static U8X8X3: Type = Type::U(8, 8, 3);
 static U8X8X4: Type = Type::U(8, 8, 4);
 
+static SVBOOL: Type = Type::Pred;
+static SVF32: Type = Type::SVF(32, 1);
+static SVF32X2: Type = Type::SVF(32, 2);
+static SVF32X3: Type = Type::SVF(32, 3);
+static SVF32X4: Type = Type::SVF(32, 4);
+static SVF64: Type = Type::SVF(64, 1);
+static SVF64X2: Type = Type::SVF(64, 2);
+static SVF64X3: Type = Type::SVF(64, 3);
+static SVF64X4: Type = Type::SVF(64, 4);
+static SVI8: Type = Type::SVI(8, 1);
+static SVI8X2: Type = Type::SVI(8, 2);
+static SVI8X3: Type = Type::SVI(8, 3);
+static SVI8X4: Type = Type::SVI(8, 4);
+static SVI16: Type = Type::SVI(16, 1);
+static SVI16X2: Type = Type::SVI(16, 2);
+static SVI16X3: Type = Type::SVI(16, 3);
+static SVI16X4: Type = Type::SVI(16, 4);
+static SVI32: Type = Type::SVI(32, 1);
+static SVI32X2: Type = Type::SVI(32, 2);
+static SVI32X3: Type = Type::SVI(32, 3);
+static SVI32X4: Type = Type::SVI(32, 4);
+static SVI64: Type = Type::SVI(64, 1);
+static SVI64X2: Type = Type::SVI(64, 2);
+static SVI64X3: Type = Type::SVI(64, 3);
+static SVI64X4: Type = Type::SVI(64, 4);
+static SVU8: Type = Type::SVU(8, 1);
+static SVU8X2: Type = Type::SVU(8, 2);
+static SVU8X3: Type = Type::SVU(8, 3);
+static SVU8X4: Type = Type::SVU(8, 4);
+static SVU16: Type = Type::SVU(16, 1);
+static SVU16X2: Type = Type::SVU(16, 2);
+static SVU16X3: Type = Type::SVU(16, 3);
+static SVU16X4: Type = Type::SVU(16, 4);
+static SVU32: Type = Type::SVU(32, 1);
+static SVU32X2: Type = Type::SVU(32, 2);
+static SVU32X3: Type = Type::SVU(32, 3);
+static SVU32X4: Type = Type::SVU(32, 4);
+static SVU64: Type = Type::SVU(64, 1);
+static SVU64X2: Type = Type::SVU(64, 2);
+static SVU64X3: Type = Type::SVU(64, 3);
+static SVU64X4: Type = Type::SVU(64, 4);
+static SVPRFOP: Type = Type::Enum("svprfop");
+static SVPATTERN: Type = Type::Enum("svpattern");
+
 #[derive(Debug, Copy, Clone, PartialEq)]
 enum Type {
+    Void,
+    PrimBool,
     PrimFloat(u8),
     PrimSigned(u8),
     PrimUnsigned(u8),
     PrimPoly(u8),
     MutPtr(&'static Type),
     ConstPtr(&'static Type),
+    Enum(&'static str),
+    GenericParam(&'static str),
     I(u8, u8, u8),
     U(u8, u8, u8),
     P(u8, u8, u8),
     F(u8, u8, u8),
+    Pred,
+    SVI(u8, u8),
+    SVU(u8, u8),
+    SVF(u8, u8),
     Never,
 }
 
@@ -178,6 +233,7 @@ fn verify_all_signatures() {
 
     let mut all_valid = true;
     for rust in FUNCTIONS {
+        // Most SVE intrinsics just rely on the intrinsics test tool for validation
         if !rust.has_test {
             let skip = [
                 "vaddq_s64",
@@ -403,18 +459,16 @@ fn verify_all_signatures() {
                 "__clrex",
                 "__dbg",
             ];
-            if !skip.contains(&rust.name) {
-                println!(
-                    "missing run-time test named `test_{}` for `{}`",
-                    {
-                        let mut id = rust.name;
-                        while id.starts_with('_') {
-                            id = &id[1..];
-                        }
-                        id
-                    },
-                    rust.name
-                );
+            if !skip.contains(&rust.name)
+                // Most run-time tests are handled by the intrinsic-test tool, except for
+                // load/stores (which have generated tests)
+                && (!rust.name.starts_with("sv") || rust.name.starts_with("svld")
+                    || rust.name.starts_with("svst"))
+                // The load/store test generator can't handle these cases yet
+                && (!rust.name.contains("_u32base_") || rust.name.contains("index") || rust.name.contains("offset"))
+                && !(rust.name.starts_with("svldff1") && rust.name.contains("gather"))
+            {
+                println!("missing run-time test for `{}`", rust.name);
                 all_valid = false;
             }
         }
@@ -487,12 +541,21 @@ fn matches(rust: &Function, arm: &Intrinsic) -> Result<(), String> {
     let mut nconst = 0;
     let iter = rust.arguments.iter().zip(&arm.arguments).enumerate();
     for (i, (rust_ty, (arm, arm_const))) in iter {
-        if *rust_ty != arm {
-            bail!("mismatched arguments: {rust_ty:?} != {arm:?}")
+        match (*rust_ty, arm) {
+            // SVE uses generic type parameters to handle void pointers
+            (Type::ConstPtr(Type::GenericParam("T")), Type::ConstPtr(Type::Void)) => (),
+            // SVE const generics use i32 over u64 for usability reasons
+            (Type::PrimSigned(32), Type::PrimUnsigned(64)) if rust.required_const.contains(&i) => {
+                ()
+            }
+            // svset doesn't have its const argument last as we assumed when building the Function
+            _ if rust.name.starts_with("svset") => (),
+            (x, y) if x == y => (),
+            _ => bail!("mismatched arguments: {rust_ty:?} != {arm:?}"),
         }
         if *arm_const {
             nconst += 1;
-            if !rust.required_const.contains(&i) {
+            if !rust.required_const.contains(&i) && !rust.name.starts_with("svset") {
                 bail!("argument const mismatch");
             }
         }
@@ -501,7 +564,7 @@ fn matches(rust: &Function, arm: &Intrinsic) -> Result<(), String> {
         bail!("wrong number of const arguments");
     }
 
-    if rust.instrs.is_empty() {
+    if rust.instrs.is_empty() && arm.instruction != "" {
         bail!(
             "instruction not listed for `{}`, but arm lists {:?}",
             rust.name,
@@ -540,7 +603,7 @@ fn matches(rust: &Function, arm: &Intrinsic) -> Result<(), String> {
     Ok(())
 }
 
-#[derive(PartialEq)]
+#[derive(Debug, PartialEq)]
 struct Intrinsic {
     name: String,
     ret: Option<Type>,
@@ -555,7 +618,7 @@ struct JsonIntrinsic {
     arguments: Vec<String>,
     return_type: ReturnType,
     #[serde(default)]
-    instructions: Vec<Vec<String>>,
+    instructions: Option<Vec<Vec<String>>>,
 }
 
 #[derive(Deserialize, Debug)]
@@ -572,8 +635,8 @@ fn parse_intrinsics(intrinsics: Vec<JsonIntrinsic>) -> HashMap<String, Intrinsic
     ret
 }
 
-fn parse_intrinsic(mut intr: JsonIntrinsic) -> Intrinsic {
-    let name = intr.name;
+fn parse_intrinsic(intr: JsonIntrinsic) -> Intrinsic {
+    let name = intr.name.replace('[', "").replace(']', "");
     let ret = if intr.return_type.value == "void" {
         None
     } else {
@@ -582,18 +645,24 @@ fn parse_intrinsic(mut intr: JsonIntrinsic) -> Intrinsic {
 
     // This ignores multiple instructions and different optional sequences for now to mimic
     // the old HTML scraping behaviour
-    let instruction = intr.instructions.swap_remove(0).swap_remove(0);
+    let instruction = intr
+        .instructions
+        .map_or(String::new(), |mut i| i.swap_remove(0).swap_remove(0));
 
     let arguments = intr
         .arguments
         .iter()
         .map(|s| {
-            let (ty, konst) = match s.strip_prefix("const") {
-                Some(stripped) => (stripped.trim_start(), true),
-                None => (s.as_str(), false),
+            let ty = if let Some(i) = s.find('*') {
+                &s[..i + 1]
+            } else {
+                s.rsplit_once(' ').unwrap().0.trim_start_matches("const ")
             };
-            let ty = ty.rsplit_once(' ').unwrap().0;
-            (parse_ty(ty), konst)
+            let ty = parse_ty(ty);
+            let konst = s.contains("const") && !matches!(ty, Type::ConstPtr(_))
+                || s.starts_with("enum")
+                || s.rsplit_once(" ").unwrap().1.starts_with("imm");
+            (ty, konst)
         })
         .collect::<Vec<_>>();
 
@@ -606,18 +675,26 @@ fn parse_intrinsic(mut intr: JsonIntrinsic) -> Intrinsic {
 }
 
 fn parse_ty(s: &str) -> Type {
-    let suffix = " const *";
-    if let Some(base) = s.strip_suffix(suffix) {
-        Type::ConstPtr(parse_ty_base(base))
-    } else if let Some(base) = s.strip_suffix(" *") {
-        Type::MutPtr(parse_ty_base(base))
+    if let Some(ty) = s.strip_suffix("*") {
+        let ty = ty.trim();
+        if let Some(ty) = ty.strip_prefix("const") {
+            // SVE intrinsics are west-const (const int8_t *)
+            Type::ConstPtr(parse_ty_base(ty))
+        } else if let Some(ty) = ty.strip_suffix("const") {
+            // Neon intrinsics are east-const (int8_t const *)
+            Type::ConstPtr(parse_ty_base(ty))
+        } else {
+            Type::MutPtr(parse_ty_base(ty))
+        }
     } else {
         *parse_ty_base(s)
     }
 }
 
 fn parse_ty_base(s: &str) -> &'static Type {
-    match s {
+    match s.trim() {
+        "bool" => &BOOL,
+        "void" => &VOID,
         "float16_t" => &F16,
         "float16x4_t" => &F16X4,
         "float16x4x2_t" => &F16X4X2,
@@ -747,6 +824,49 @@ fn parse_ty_base(s: &str) -> &'static Type {
         "uint8x8x2_t" => &U8X8X2,
         "uint8x8x3_t" => &U8X8X3,
         "uint8x8x4_t" => &U8X8X4,
+        "svbool_t" => &SVBOOL,
+        "svfloat32_t" => &SVF32,
+        "svfloat32x2_t" => &SVF32X2,
+        "svfloat32x3_t" => &SVF32X3,
+        "svfloat32x4_t" => &SVF32X4,
+        "svfloat64_t" => &SVF64,
+        "svfloat64x2_t" => &SVF64X2,
+        "svfloat64x3_t" => &SVF64X3,
+        "svfloat64x4_t" => &SVF64X4,
+        "svint8_t" => &SVI8,
+        "svint8x2_t" => &SVI8X2,
+        "svint8x3_t" => &SVI8X3,
+        "svint8x4_t" => &SVI8X4,
+        "svint16_t" => &SVI16,
+        "svint16x2_t" => &SVI16X2,
+        "svint16x3_t" => &SVI16X3,
+        "svint16x4_t" => &SVI16X4,
+        "svint32_t" => &SVI32,
+        "svint32x2_t" => &SVI32X2,
+        "svint32x3_t" => &SVI32X3,
+        "svint32x4_t" => &SVI32X4,
+        "svint64_t" => &SVI64,
+        "svint64x2_t" => &SVI64X2,
+        "svint64x3_t" => &SVI64X3,
+        "svint64x4_t" => &SVI64X4,
+        "svuint8_t" => &SVU8,
+        "svuint8x2_t" => &SVU8X2,
+        "svuint8x3_t" => &SVU8X3,
+        "svuint8x4_t" => &SVU8X4,
+        "svuint16_t" => &SVU16,
+        "svuint16x2_t" => &SVU16X2,
+        "svuint16x3_t" => &SVU16X3,
+        "svuint16x4_t" => &SVU16X4,
+        "svuint32_t" => &SVU32,
+        "svuint32x2_t" => &SVU32X2,
+        "svuint32x3_t" => &SVU32X3,
+        "svuint32x4_t" => &SVU32X4,
+        "svuint64_t" => &SVU64,
+        "svuint64x2_t" => &SVU64X2,
+        "svuint64x3_t" => &SVU64X3,
+        "svuint64x4_t" => &SVU64X4,
+        "enum svprfop" => &SVPRFOP,
+        "enum svpattern" => &SVPATTERN,
 
         _ => panic!("failed to parse json type {s:?}"),
     }
diff --git a/intrinsics_data/arm_intrinsics.json b/intrinsics_data/arm_intrinsics.json
index a463564932..dedb98092d 100644
--- a/intrinsics_data/arm_intrinsics.json
+++ b/intrinsics_data/arm_intrinsics.json
@@ -7,6 +7,7 @@
       "uint8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -35,6 +36,7 @@
       "uint8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -63,6 +65,7 @@
       "uint64_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -91,6 +94,7 @@
       "uint16_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -119,6 +123,7 @@
       "uint32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -147,6 +152,7 @@
       "uint64_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -175,6 +181,7 @@
       "uint16_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -203,6 +210,7 @@
       "uint32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -223,6 +231,186920 @@
       ]
     ]
   },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaba[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABA"
+      ],
+      [
+        "MOVPRFX",
+        "SABA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaba[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABA"
+      ],
+      [
+        "MOVPRFX",
+        "SABA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaba[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABA"
+      ],
+      [
+        "MOVPRFX",
+        "SABA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaba[_n_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABA"
+      ],
+      [
+        "MOVPRFX",
+        "SABA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaba[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABA"
+      ],
+      [
+        "MOVPRFX",
+        "UABA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaba[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABA"
+      ],
+      [
+        "MOVPRFX",
+        "UABA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaba[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABA"
+      ],
+      [
+        "MOVPRFX",
+        "UABA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaba[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABA"
+      ],
+      [
+        "MOVPRFX",
+        "UABA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaba[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABA"
+      ],
+      [
+        "MOVPRFX",
+        "SABA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaba[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABA"
+      ],
+      [
+        "MOVPRFX",
+        "SABA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaba[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABA"
+      ],
+      [
+        "MOVPRFX",
+        "SABA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaba[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABA"
+      ],
+      [
+        "MOVPRFX",
+        "SABA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaba[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABA"
+      ],
+      [
+        "MOVPRFX",
+        "UABA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaba[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABA"
+      ],
+      [
+        "MOVPRFX",
+        "UABA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaba[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABA"
+      ],
+      [
+        "MOVPRFX",
+        "UABA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaba[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABA"
+      ],
+      [
+        "MOVPRFX",
+        "UABA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalb[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABALB"
+      ],
+      [
+        "MOVPRFX",
+        "SABALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalb[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABALB"
+      ],
+      [
+        "MOVPRFX",
+        "SABALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalb[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABALB"
+      ],
+      [
+        "MOVPRFX",
+        "SABALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalb[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABALB"
+      ],
+      [
+        "MOVPRFX",
+        "UABALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalb[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABALB"
+      ],
+      [
+        "MOVPRFX",
+        "UABALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalb[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABALB"
+      ],
+      [
+        "MOVPRFX",
+        "UABALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalb[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABALB"
+      ],
+      [
+        "MOVPRFX",
+        "SABALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalb[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABALB"
+      ],
+      [
+        "MOVPRFX",
+        "SABALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalb[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABALB"
+      ],
+      [
+        "MOVPRFX",
+        "SABALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalb[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABALB"
+      ],
+      [
+        "MOVPRFX",
+        "UABALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalb[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABALB"
+      ],
+      [
+        "MOVPRFX",
+        "UABALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalb[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABALB"
+      ],
+      [
+        "MOVPRFX",
+        "UABALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalt[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABALT"
+      ],
+      [
+        "MOVPRFX",
+        "SABALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalt[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABALT"
+      ],
+      [
+        "MOVPRFX",
+        "SABALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalt[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABALT"
+      ],
+      [
+        "MOVPRFX",
+        "SABALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalt[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABALT"
+      ],
+      [
+        "MOVPRFX",
+        "UABALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalt[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABALT"
+      ],
+      [
+        "MOVPRFX",
+        "UABALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalt[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABALT"
+      ],
+      [
+        "MOVPRFX",
+        "UABALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalt[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABALT"
+      ],
+      [
+        "MOVPRFX",
+        "SABALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalt[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABALT"
+      ],
+      [
+        "MOVPRFX",
+        "SABALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalt[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABALT"
+      ],
+      [
+        "MOVPRFX",
+        "SABALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalt[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABALT"
+      ],
+      [
+        "MOVPRFX",
+        "UABALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalt[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABALT"
+      ],
+      [
+        "MOVPRFX",
+        "UABALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabalt[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABALT"
+      ],
+      [
+        "MOVPRFX",
+        "UABALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FABD"
+      ],
+      [
+        "MOVPRFX",
+        "FABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FABD"
+      ],
+      [
+        "FABD"
+      ],
+      [
+        "MOVPRFX",
+        "FABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FABD"
+      ],
+      [
+        "MOVPRFX",
+        "FABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FABD"
+      ],
+      [
+        "MOVPRFX",
+        "FABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FABD"
+      ],
+      [
+        "FABD"
+      ],
+      [
+        "MOVPRFX",
+        "FABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FABD"
+      ],
+      [
+        "MOVPRFX",
+        "FABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FABD"
+      ],
+      [
+        "MOVPRFX",
+        "FABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FABD"
+      ],
+      [
+        "FABD"
+      ],
+      [
+        "MOVPRFX",
+        "FABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FABD"
+      ],
+      [
+        "MOVPRFX",
+        "FABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FABD"
+      ],
+      [
+        "MOVPRFX",
+        "FABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FABD"
+      ],
+      [
+        "FABD"
+      ],
+      [
+        "MOVPRFX",
+        "FABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FABD"
+      ],
+      [
+        "MOVPRFX",
+        "FABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABD"
+      ],
+      [
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABD"
+      ],
+      [
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABD"
+      ],
+      [
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABD"
+      ],
+      [
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABD"
+      ],
+      [
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABD"
+      ],
+      [
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABD"
+      ],
+      [
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABD"
+      ],
+      [
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABD"
+      ],
+      [
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABD"
+      ],
+      [
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABD"
+      ],
+      [
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABD"
+      ],
+      [
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SABD"
+      ],
+      [
+        "MOVPRFX",
+        "SABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABD"
+      ],
+      [
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABD"
+      ],
+      [
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABD"
+      ],
+      [
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABD"
+      ],
+      [
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabd[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UABD"
+      ],
+      [
+        "MOVPRFX",
+        "UABD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlb[_n_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlb[_n_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlb[_n_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlb[_n_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlb[_n_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlb[_n_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlb[_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlb[_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlb[_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlb[_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlb[_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlb[_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlt[_n_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlt[_n_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlt[_n_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlt[_n_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlt[_n_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlt[_n_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlt[_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlt[_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlt[_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SABDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlt[_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlt[_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svabdlt[_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UABDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabs[_f32]_m",
+    "arguments": [
+      "svfloat32_t inactive",
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FABS"
+      ],
+      [
+        "MOVPRFX",
+        "FABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabs[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FABS"
+      ],
+      [
+        "MOVPRFX",
+        "FABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabs[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabs[_f64]_m",
+    "arguments": [
+      "svfloat64_t inactive",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FABS"
+      ],
+      [
+        "MOVPRFX",
+        "FABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabs[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FABS"
+      ],
+      [
+        "MOVPRFX",
+        "FABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabs[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabs[_s16]_m",
+    "arguments": [
+      "svint16_t inactive",
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.H|Ztied.H"
+      },
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ABS"
+      ],
+      [
+        "MOVPRFX",
+        "ABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabs[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ABS"
+      ],
+      [
+        "MOVPRFX",
+        "ABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabs[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabs[_s32]_m",
+    "arguments": [
+      "svint32_t inactive",
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ABS"
+      ],
+      [
+        "MOVPRFX",
+        "ABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabs[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ABS"
+      ],
+      [
+        "MOVPRFX",
+        "ABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabs[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabs[_s64]_m",
+    "arguments": [
+      "svint64_t inactive",
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ABS"
+      ],
+      [
+        "MOVPRFX",
+        "ABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabs[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ABS"
+      ],
+      [
+        "MOVPRFX",
+        "ABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabs[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabs[_s8]_m",
+    "arguments": [
+      "svint8_t inactive",
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.B|Ztied.B"
+      },
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ABS"
+      ],
+      [
+        "MOVPRFX",
+        "ABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabs[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B|Ztied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ABS"
+      ],
+      [
+        "MOVPRFX",
+        "ABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svabs[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svacge[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FACGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svacge[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FACGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svacge[_n_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FACGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svacge[_n_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FACGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svacgt[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FACGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svacgt[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FACGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svacgt[_n_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FACGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svacgt[_n_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FACGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svacle[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FACGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svacle[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FACGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svacle[_n_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FACGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svacle[_n_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FACGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svaclt[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FACGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svaclt[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FACGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svaclt[_n_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FACGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svaclt[_n_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FACGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadalp[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADALP"
+      ],
+      [
+        "MOVPRFX",
+        "SADALP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadalp[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADALP"
+      ],
+      [
+        "MOVPRFX",
+        "SADALP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadalp[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SADALP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadalp[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADALP"
+      ],
+      [
+        "MOVPRFX",
+        "SADALP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadalp[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADALP"
+      ],
+      [
+        "MOVPRFX",
+        "SADALP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadalp[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SADALP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadalp[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADALP"
+      ],
+      [
+        "MOVPRFX",
+        "SADALP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadalp[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADALP"
+      ],
+      [
+        "MOVPRFX",
+        "SADALP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadalp[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SADALP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadalp[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADALP"
+      ],
+      [
+        "MOVPRFX",
+        "UADALP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadalp[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADALP"
+      ],
+      [
+        "MOVPRFX",
+        "UADALP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadalp[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UADALP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadalp[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADALP"
+      ],
+      [
+        "MOVPRFX",
+        "UADALP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadalp[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADALP"
+      ],
+      [
+        "MOVPRFX",
+        "UADALP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadalp[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UADALP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadalp[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADALP"
+      ],
+      [
+        "MOVPRFX",
+        "UADALP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadalp[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADALP"
+      ],
+      [
+        "MOVPRFX",
+        "UADALP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadalp[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UADALP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadclb[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADCLB"
+      ],
+      [
+        "MOVPRFX",
+        "ADCLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadclb[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADCLB"
+      ],
+      [
+        "MOVPRFX",
+        "ADCLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadclb[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADCLB"
+      ],
+      [
+        "MOVPRFX",
+        "ADCLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadclb[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADCLB"
+      ],
+      [
+        "MOVPRFX",
+        "ADCLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadclt[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADCLT"
+      ],
+      [
+        "MOVPRFX",
+        "ADCLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadclt[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADCLT"
+      ],
+      [
+        "MOVPRFX",
+        "ADCLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadclt[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADCLT"
+      ],
+      [
+        "MOVPRFX",
+        "ADCLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svadclt[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADCLT"
+      ],
+      [
+        "MOVPRFX",
+        "ADCLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FADD"
+      ],
+      [
+        "MOVPRFX",
+        "FADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FADD"
+      ],
+      [
+        "FADD"
+      ],
+      [
+        "FADD"
+      ],
+      [
+        "MOVPRFX",
+        "FADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FADD"
+      ],
+      [
+        "MOVPRFX",
+        "FADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FADD"
+      ],
+      [
+        "MOVPRFX",
+        "FADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FADD"
+      ],
+      [
+        "FADD"
+      ],
+      [
+        "FADD"
+      ],
+      [
+        "MOVPRFX",
+        "FADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FADD"
+      ],
+      [
+        "MOVPRFX",
+        "FADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FADD"
+      ],
+      [
+        "FSUB"
+      ],
+      [
+        "FADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FADD"
+      ],
+      [
+        "FSUB"
+      ],
+      [
+        "FADD"
+      ],
+      [
+        "FADD"
+      ],
+      [
+        "FADD"
+      ],
+      [
+        "MOVPRFX",
+        "FADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FADD"
+      ],
+      [
+        "MOVPRFX",
+        "FSUB"
+      ],
+      [
+        "MOVPRFX",
+        "FADD"
+      ],
+      [
+        "MOVPRFX",
+        "FADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FADD"
+      ],
+      [
+        "FSUB"
+      ],
+      [
+        "FADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FADD"
+      ],
+      [
+        "FSUB"
+      ],
+      [
+        "FADD"
+      ],
+      [
+        "FADD"
+      ],
+      [
+        "FADD"
+      ],
+      [
+        "MOVPRFX",
+        "FADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FADD"
+      ],
+      [
+        "MOVPRFX",
+        "FSUB"
+      ],
+      [
+        "MOVPRFX",
+        "FADD"
+      ],
+      [
+        "MOVPRFX",
+        "FADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadd[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ADD"
+      ],
+      [
+        "MOVPRFX",
+        "ADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadda[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t initial",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "float32_t"
+    },
+    "Arguments_Preparation": {
+      "initial": {
+        "register": "Stied"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FADDA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadda[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t initial",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "float64_t"
+    },
+    "Arguments_Preparation": {
+      "initial": {
+        "register": "Dtied"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FADDA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnb[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnb[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnb[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnb[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnb[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnb[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnb[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnb[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnb[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnb[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnb[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnb[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnt[_n_s16]",
+    "arguments": [
+      "svint8_t even",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnt[_n_s32]",
+    "arguments": [
+      "svint16_t even",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnt[_n_s64]",
+    "arguments": [
+      "svint32_t even",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnt[_n_u16]",
+    "arguments": [
+      "svuint8_t even",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnt[_n_u32]",
+    "arguments": [
+      "svuint16_t even",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnt[_n_u64]",
+    "arguments": [
+      "svuint32_t even",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnt[_s16]",
+    "arguments": [
+      "svint8_t even",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnt[_s32]",
+    "arguments": [
+      "svint16_t even",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnt[_s64]",
+    "arguments": [
+      "svint32_t even",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnt[_u16]",
+    "arguments": [
+      "svuint8_t even",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnt[_u32]",
+    "arguments": [
+      "svuint16_t even",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddhnt[_u64]",
+    "arguments": [
+      "svuint32_t even",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlb[_n_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlb[_n_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlb[_n_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlb[_n_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlb[_n_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlb[_n_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlb[_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlb[_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlb[_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlb[_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlb[_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlb[_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlbt[_n_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDLBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlbt[_n_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDLBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlbt[_n_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDLBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlbt[_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDLBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlbt[_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDLBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlbt[_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDLBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlt[_n_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlt[_n_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlt[_n_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlt[_n_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlt[_n_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlt[_n_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlt[_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlt[_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlt[_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlt[_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlt[_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddlt[_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FADDP"
+      ],
+      [
+        "MOVPRFX",
+        "FADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FADDP"
+      ],
+      [
+        "MOVPRFX",
+        "FADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FADDP"
+      ],
+      [
+        "MOVPRFX",
+        "FADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FADDP"
+      ],
+      [
+        "MOVPRFX",
+        "FADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDP"
+      ],
+      [
+        "MOVPRFX",
+        "ADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDP"
+      ],
+      [
+        "MOVPRFX",
+        "ADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDP"
+      ],
+      [
+        "MOVPRFX",
+        "ADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDP"
+      ],
+      [
+        "MOVPRFX",
+        "ADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDP"
+      ],
+      [
+        "MOVPRFX",
+        "ADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDP"
+      ],
+      [
+        "MOVPRFX",
+        "ADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDP"
+      ],
+      [
+        "MOVPRFX",
+        "ADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDP"
+      ],
+      [
+        "MOVPRFX",
+        "ADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDP"
+      ],
+      [
+        "MOVPRFX",
+        "ADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDP"
+      ],
+      [
+        "MOVPRFX",
+        "ADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDP"
+      ],
+      [
+        "MOVPRFX",
+        "ADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDP"
+      ],
+      [
+        "MOVPRFX",
+        "ADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDP"
+      ],
+      [
+        "MOVPRFX",
+        "ADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDP"
+      ],
+      [
+        "MOVPRFX",
+        "ADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDP"
+      ],
+      [
+        "MOVPRFX",
+        "ADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddp[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADDP"
+      ],
+      [
+        "MOVPRFX",
+        "ADDP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svaddv[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "float32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FADDV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svaddv[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "float64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FADDV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svaddv[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svaddv[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svaddv[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svaddv[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svaddv[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svaddv[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svaddv[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svaddv[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwb[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwb[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwb[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwb[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwb[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwb[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwb[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwb[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwb[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwb[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwb[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwb[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwt[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwt[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwt[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwt[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwt[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwt[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwt[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwt[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwt[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SADDWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwt[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwt[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaddwt[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UADDWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadrb[_u32base]_[s32]offset",
+    "arguments": [
+      "svuint32_t bases",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadrb[_u32base]_[u32]offset",
+    "arguments": [
+      "svuint32_t bases",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadrb[_u64base]_[s64]offset",
+    "arguments": [
+      "svuint64_t bases",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadrb[_u64base]_[u64]offset",
+    "arguments": [
+      "svuint64_t bases",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadrd[_u32base]_[s32]index",
+    "arguments": [
+      "svuint32_t bases",
+      "svint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadrd[_u32base]_[u32]index",
+    "arguments": [
+      "svuint32_t bases",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadrd[_u64base]_[s64]index",
+    "arguments": [
+      "svuint64_t bases",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadrd[_u64base]_[u64]index",
+    "arguments": [
+      "svuint64_t bases",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadrh[_u32base]_[s32]index",
+    "arguments": [
+      "svuint32_t bases",
+      "svint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadrh[_u32base]_[u32]index",
+    "arguments": [
+      "svuint32_t bases",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadrh[_u64base]_[s64]index",
+    "arguments": [
+      "svuint64_t bases",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadrh[_u64base]_[u64]index",
+    "arguments": [
+      "svuint64_t bases",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadrw[_u32base]_[s32]index",
+    "arguments": [
+      "svuint32_t bases",
+      "svint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadrw[_u32base]_[u32]index",
+    "arguments": [
+      "svuint32_t bases",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadrw[_u64base]_[s64]index",
+    "arguments": [
+      "svuint64_t bases",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svadrw[_u64base]_[u64]index",
+    "arguments": [
+      "svuint64_t bases",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ADR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaesd[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AESD"
+      ],
+      [
+        "AESD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaese[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AESE"
+      ],
+      [
+        "AESE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaesimc[_u8]",
+    "arguments": [
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Ztied.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AESIMC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svaesmc[_u8]",
+    "arguments": [
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Ztied.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AESMC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_b]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.B"
+      },
+      "op2": {
+        "register": "Pop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UXTB"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UXTB"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UXTB"
+      ],
+      [
+        "UXTH"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UXTB"
+      ],
+      [
+        "MOVPRFX",
+        "UXTH"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UXTB"
+      ],
+      [
+        "UXTH"
+      ],
+      [
+        "UXTW"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UXTB"
+      ],
+      [
+        "MOVPRFX",
+        "UXTH"
+      ],
+      [
+        "MOVPRFX",
+        "UXTW"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UXTB"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UXTB"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UXTB"
+      ],
+      [
+        "UXTH"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UXTB"
+      ],
+      [
+        "MOVPRFX",
+        "UXTH"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UXTB"
+      ],
+      [
+        "UXTH"
+      ],
+      [
+        "UXTW"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UXTB"
+      ],
+      [
+        "MOVPRFX",
+        "UXTH"
+      ],
+      [
+        "MOVPRFX",
+        "UXTW"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svand[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "AND"
+      ],
+      [
+        "MOVPRFX",
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svandv[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "int16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ANDV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svandv[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ANDV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svandv[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ANDV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svandv[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "int8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ANDV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svandv[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "uint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ANDV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svandv[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ANDV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svandv[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ANDV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svandv[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "uint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ANDV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ],
+      [
+        "ASRR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASRR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ],
+      [
+        "ASRR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASRR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ],
+      [
+        "ASRR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASRR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ],
+      [
+        "ASRR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASRR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASRR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASRR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASRR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASRR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASRR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASRR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASRR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASRR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr_wide[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr_wide[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr_wide[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr_wide[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr_wide[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr_wide[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr_wide[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr_wide[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr_wide[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr_wide[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr_wide[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr_wide[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr_wide[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr_wide[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr_wide[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr_wide[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr_wide[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASR"
+      ],
+      [
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasr_wide[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ASR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasrd[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASRD"
+      ],
+      [
+        "MOVPRFX",
+        "ASRD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasrd[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASRD"
+      ],
+      [
+        "MOVPRFX",
+        "ASRD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasrd[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ASRD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasrd[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASRD"
+      ],
+      [
+        "MOVPRFX",
+        "ASRD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasrd[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASRD"
+      ],
+      [
+        "MOVPRFX",
+        "ASRD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasrd[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ASRD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasrd[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 64
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASRD"
+      ],
+      [
+        "MOVPRFX",
+        "ASRD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasrd[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 64
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASRD"
+      ],
+      [
+        "MOVPRFX",
+        "ASRD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasrd[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 64
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ASRD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasrd[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASRD"
+      ],
+      [
+        "MOVPRFX",
+        "ASRD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasrd[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ASRD"
+      ],
+      [
+        "MOVPRFX",
+        "ASRD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svasrd[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ASRD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbcax[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BCAX"
+      ],
+      [
+        "MOVPRFX",
+        "BCAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbcax[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BCAX"
+      ],
+      [
+        "MOVPRFX",
+        "BCAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbcax[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BCAX"
+      ],
+      [
+        "MOVPRFX",
+        "BCAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbcax[_n_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BCAX"
+      ],
+      [
+        "MOVPRFX",
+        "BCAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbcax[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BCAX"
+      ],
+      [
+        "MOVPRFX",
+        "BCAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbcax[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BCAX"
+      ],
+      [
+        "MOVPRFX",
+        "BCAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbcax[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BCAX"
+      ],
+      [
+        "MOVPRFX",
+        "BCAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbcax[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BCAX"
+      ],
+      [
+        "MOVPRFX",
+        "BCAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbcax[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BCAX"
+      ],
+      [
+        "MOVPRFX",
+        "BCAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbcax[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BCAX"
+      ],
+      [
+        "MOVPRFX",
+        "BCAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbcax[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BCAX"
+      ],
+      [
+        "MOVPRFX",
+        "BCAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbcax[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BCAX"
+      ],
+      [
+        "MOVPRFX",
+        "BCAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbcax[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BCAX"
+      ],
+      [
+        "MOVPRFX",
+        "BCAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbcax[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BCAX"
+      ],
+      [
+        "MOVPRFX",
+        "BCAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbcax[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BCAX"
+      ],
+      [
+        "MOVPRFX",
+        "BCAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbcax[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BCAX"
+      ],
+      [
+        "MOVPRFX",
+        "BCAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbdep[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BDEP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbdep[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BDEP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbdep[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BDEP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbdep[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BDEP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbdep[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BDEP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbdep[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BDEP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbdep[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BDEP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbdep[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BDEP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbext[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BEXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbext[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BEXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbext[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BEXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbext[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BEXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbext[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BEXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbext[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BEXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbext[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BEXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbext[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BEXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbgrp[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BGRP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbgrp[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BGRP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbgrp[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BGRP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbgrp[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BGRP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbgrp[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BGRP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbgrp[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BGRP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbgrp[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BGRP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbgrp[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BGRP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_b]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.B"
+      },
+      "op2": {
+        "register": "Pop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "BIC"
+      ],
+      [
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "BIC"
+      ],
+      [
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "BIC"
+      ],
+      [
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "BIC"
+      ],
+      [
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "BIC"
+      ],
+      [
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "BIC"
+      ],
+      [
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "BIC"
+      ],
+      [
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ],
+      [
+        "BIC"
+      ],
+      [
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BIC"
+      ],
+      [
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbic[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "BIC"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbrka[_b]_m",
+    "arguments": [
+      "svbool_t inactive",
+      "svbool_t pg",
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Ptied.B"
+      },
+      "op": {
+        "register": "Pop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BRKA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbrka[_b]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Pop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BRKA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbrkb[_b]_m",
+    "arguments": [
+      "svbool_t inactive",
+      "svbool_t pg",
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Ptied.B"
+      },
+      "op": {
+        "register": "Pop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BRKB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbrkb[_b]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Pop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BRKB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbrkn[_b]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.B"
+      },
+      "op2": {
+        "register": "Ptied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BRKN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbrkpa[_b]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.B"
+      },
+      "op2": {
+        "register": "Pop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BRKPA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svbrkpb[_b]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.B"
+      },
+      "op2": {
+        "register": "Pop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BRKPB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl1n[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL1N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL1N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl1n[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL1N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL1N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl1n[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL1N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL1N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl1n[_n_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL1N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL1N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl1n[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL1N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL1N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl1n[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL1N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL1N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl1n[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL1N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL1N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl1n[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL1N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL1N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl1n[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL1N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL1N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl1n[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL1N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL1N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl1n[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL1N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL1N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl1n[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL1N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL1N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl1n[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL1N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL1N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl1n[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL1N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL1N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl1n[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL1N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL1N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl1n[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL1N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL1N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl2n[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL2N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL2N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl2n[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL2N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL2N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl2n[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL2N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL2N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl2n[_n_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL2N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL2N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl2n[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL2N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL2N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl2n[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL2N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL2N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl2n[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL2N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL2N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl2n[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL2N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL2N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl2n[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL2N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL2N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl2n[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL2N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL2N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl2n[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL2N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL2N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl2n[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL2N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL2N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl2n[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL2N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL2N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl2n[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL2N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL2N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl2n[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL2N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL2N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl2n[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL2N"
+      ],
+      [
+        "MOVPRFX",
+        "BSL2N"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL"
+      ],
+      [
+        "MOVPRFX",
+        "BSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL"
+      ],
+      [
+        "MOVPRFX",
+        "BSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL"
+      ],
+      [
+        "MOVPRFX",
+        "BSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl[_n_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL"
+      ],
+      [
+        "MOVPRFX",
+        "BSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL"
+      ],
+      [
+        "MOVPRFX",
+        "BSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL"
+      ],
+      [
+        "MOVPRFX",
+        "BSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL"
+      ],
+      [
+        "MOVPRFX",
+        "BSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL"
+      ],
+      [
+        "MOVPRFX",
+        "BSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL"
+      ],
+      [
+        "MOVPRFX",
+        "BSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL"
+      ],
+      [
+        "MOVPRFX",
+        "BSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL"
+      ],
+      [
+        "MOVPRFX",
+        "BSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL"
+      ],
+      [
+        "MOVPRFX",
+        "BSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL"
+      ],
+      [
+        "MOVPRFX",
+        "BSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL"
+      ],
+      [
+        "MOVPRFX",
+        "BSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL"
+      ],
+      [
+        "MOVPRFX",
+        "BSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svbsl[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "BSL"
+      ],
+      [
+        "MOVPRFX",
+        "BSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcadd[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCADD"
+      ],
+      [
+        "MOVPRFX",
+        "FCADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcadd[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCADD"
+      ],
+      [
+        "MOVPRFX",
+        "FCADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcadd[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FCADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcadd[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCADD"
+      ],
+      [
+        "MOVPRFX",
+        "FCADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcadd[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCADD"
+      ],
+      [
+        "MOVPRFX",
+        "FCADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcadd[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FCADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcadd[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CADD"
+      ],
+      [
+        "MOVPRFX",
+        "CADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcadd[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CADD"
+      ],
+      [
+        "MOVPRFX",
+        "CADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcadd[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CADD"
+      ],
+      [
+        "MOVPRFX",
+        "CADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcadd[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CADD"
+      ],
+      [
+        "MOVPRFX",
+        "CADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcadd[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CADD"
+      ],
+      [
+        "MOVPRFX",
+        "CADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcadd[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CADD"
+      ],
+      [
+        "MOVPRFX",
+        "CADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcadd[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CADD"
+      ],
+      [
+        "MOVPRFX",
+        "CADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcadd[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CADD"
+      ],
+      [
+        "MOVPRFX",
+        "CADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcdot[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint8_t op2",
+      "svint8_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CDOT"
+      ],
+      [
+        "MOVPRFX",
+        "CDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcdot[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint16_t op2",
+      "svint16_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CDOT"
+      ],
+      [
+        "MOVPRFX",
+        "CDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcdot_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint8_t op2",
+      "svint8_t op3",
+      "uint64_t imm_index",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CDOT"
+      ],
+      [
+        "MOVPRFX",
+        "CDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcdot_lane[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint16_t op2",
+      "svint16_t op3",
+      "uint64_t imm_index",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CDOT"
+      ],
+      [
+        "MOVPRFX",
+        "CDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t fallback",
+      "svfloat32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "fallback": {
+        "register": "Zfallback.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t fallback",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "fallback": {
+        "register": "Zfallback.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_n_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t fallback",
+      "svfloat32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "float32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "fallback": {
+        "register": "Stied|Wtied"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_n_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t fallback",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "float64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "fallback": {
+        "register": "Dtied|Xtied"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_n_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t fallback",
+      "svint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "int16_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.H"
+      },
+      "fallback": {
+        "register": "Htied|Wtied"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_n_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t fallback",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "fallback": {
+        "register": "Stied|Wtied"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_n_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t fallback",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "fallback": {
+        "register": "Dtied|Xtied"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_n_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t fallback",
+      "svint8_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "int8_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.B"
+      },
+      "fallback": {
+        "register": "Btied|Wtied"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_n_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t fallback",
+      "svuint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "uint16_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.H"
+      },
+      "fallback": {
+        "register": "Htied|Wtied"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_n_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t fallback",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "fallback": {
+        "register": "Stied|Wtied"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_n_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t fallback",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "fallback": {
+        "register": "Dtied|Xtied"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_n_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t fallback",
+      "svuint8_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "uint8_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.B"
+      },
+      "fallback": {
+        "register": "Btied|Wtied"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t fallback",
+      "svint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.H"
+      },
+      "fallback": {
+        "register": "Zfallback.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t fallback",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "fallback": {
+        "register": "Zfallback.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t fallback",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "fallback": {
+        "register": "Zfallback.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t fallback",
+      "svint8_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.B"
+      },
+      "fallback": {
+        "register": "Zfallback.B|Ztied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t fallback",
+      "svuint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.H"
+      },
+      "fallback": {
+        "register": "Zfallback.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t fallback",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "fallback": {
+        "register": "Zfallback.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t fallback",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "fallback": {
+        "register": "Zfallback.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclasta[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t fallback",
+      "svuint8_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.B"
+      },
+      "fallback": {
+        "register": "Zfallback.B|Ztied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTA"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t fallback",
+      "svfloat32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "fallback": {
+        "register": "Zfallback.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t fallback",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "fallback": {
+        "register": "Zfallback.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_n_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t fallback",
+      "svfloat32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "float32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "fallback": {
+        "register": "Stied|Wtied"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_n_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t fallback",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "float64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "fallback": {
+        "register": "Dtied|Xtied"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_n_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t fallback",
+      "svint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "int16_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.H"
+      },
+      "fallback": {
+        "register": "Htied|Wtied"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_n_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t fallback",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "fallback": {
+        "register": "Stied|Wtied"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_n_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t fallback",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "fallback": {
+        "register": "Dtied|Xtied"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_n_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t fallback",
+      "svint8_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "int8_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.B"
+      },
+      "fallback": {
+        "register": "Btied|Wtied"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_n_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t fallback",
+      "svuint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "uint16_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.H"
+      },
+      "fallback": {
+        "register": "Htied|Wtied"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_n_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t fallback",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "fallback": {
+        "register": "Stied|Wtied"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_n_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t fallback",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "fallback": {
+        "register": "Dtied|Xtied"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_n_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t fallback",
+      "svuint8_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "uint8_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.B"
+      },
+      "fallback": {
+        "register": "Btied|Wtied"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t fallback",
+      "svint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.H"
+      },
+      "fallback": {
+        "register": "Zfallback.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t fallback",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "fallback": {
+        "register": "Zfallback.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t fallback",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "fallback": {
+        "register": "Zfallback.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t fallback",
+      "svint8_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.B"
+      },
+      "fallback": {
+        "register": "Zfallback.B|Ztied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t fallback",
+      "svuint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.H"
+      },
+      "fallback": {
+        "register": "Zfallback.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t fallback",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "fallback": {
+        "register": "Zfallback.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t fallback",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "fallback": {
+        "register": "Zfallback.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclastb[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t fallback",
+      "svuint8_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.B"
+      },
+      "fallback": {
+        "register": "Zfallback.B|Ztied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLASTB"
+      ],
+      [
+        "MOVPRFX",
+        "CLASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcls[_s16]_m",
+    "arguments": [
+      "svuint16_t inactive",
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.H|Ztied.H"
+      },
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLS"
+      ],
+      [
+        "MOVPRFX",
+        "CLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcls[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLS"
+      ],
+      [
+        "MOVPRFX",
+        "CLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcls[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcls[_s32]_m",
+    "arguments": [
+      "svuint32_t inactive",
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLS"
+      ],
+      [
+        "MOVPRFX",
+        "CLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcls[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLS"
+      ],
+      [
+        "MOVPRFX",
+        "CLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcls[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcls[_s64]_m",
+    "arguments": [
+      "svuint64_t inactive",
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLS"
+      ],
+      [
+        "MOVPRFX",
+        "CLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcls[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLS"
+      ],
+      [
+        "MOVPRFX",
+        "CLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcls[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcls[_s8]_m",
+    "arguments": [
+      "svuint8_t inactive",
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.B|Ztied.B"
+      },
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLS"
+      ],
+      [
+        "MOVPRFX",
+        "CLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcls[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B|Ztied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLS"
+      ],
+      [
+        "MOVPRFX",
+        "CLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcls[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_s16]_m",
+    "arguments": [
+      "svuint16_t inactive",
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.H|Ztied.H"
+      },
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLZ"
+      ],
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLZ"
+      ],
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_s32]_m",
+    "arguments": [
+      "svuint32_t inactive",
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLZ"
+      ],
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLZ"
+      ],
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_s64]_m",
+    "arguments": [
+      "svuint64_t inactive",
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLZ"
+      ],
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLZ"
+      ],
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_s8]_m",
+    "arguments": [
+      "svuint8_t inactive",
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.B|Ztied.B"
+      },
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLZ"
+      ],
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B|Ztied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLZ"
+      ],
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_u16]_m",
+    "arguments": [
+      "svuint16_t inactive",
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.H|Ztied.H"
+      },
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLZ"
+      ],
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLZ"
+      ],
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_u32]_m",
+    "arguments": [
+      "svuint32_t inactive",
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLZ"
+      ],
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLZ"
+      ],
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_u64]_m",
+    "arguments": [
+      "svuint64_t inactive",
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLZ"
+      ],
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLZ"
+      ],
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_u8]_m",
+    "arguments": [
+      "svuint8_t inactive",
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.B|Ztied.B"
+      },
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLZ"
+      ],
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B|Ztied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CLZ"
+      ],
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svclz[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CLZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmla[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FCMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmla[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FCMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmla[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FCMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmla[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FCMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmla[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FCMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmla[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FCMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcmla[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMLA"
+      ],
+      [
+        "MOVPRFX",
+        "CMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcmla[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMLA"
+      ],
+      [
+        "MOVPRFX",
+        "CMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcmla[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMLA"
+      ],
+      [
+        "MOVPRFX",
+        "CMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcmla[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMLA"
+      ],
+      [
+        "MOVPRFX",
+        "CMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcmla[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMLA"
+      ],
+      [
+        "MOVPRFX",
+        "CMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcmla[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMLA"
+      ],
+      [
+        "MOVPRFX",
+        "CMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcmla[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMLA"
+      ],
+      [
+        "MOVPRFX",
+        "CMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcmla[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMLA"
+      ],
+      [
+        "MOVPRFX",
+        "CMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmla_lane[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3",
+      "uint64_t imm_index",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FCMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcmla_lane[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3",
+      "uint64_t imm_index",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMLA"
+      ],
+      [
+        "MOVPRFX",
+        "CMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcmla_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3",
+      "uint64_t imm_index",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMLA"
+      ],
+      [
+        "MOVPRFX",
+        "CMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcmla_lane[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3",
+      "uint64_t imm_index",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMLA"
+      ],
+      [
+        "MOVPRFX",
+        "CMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcmla_lane[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3",
+      "uint64_t imm_index",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMLA"
+      ],
+      [
+        "MOVPRFX",
+        "CMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_n_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMEQ"
+      ],
+      [
+        "FCMEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_n_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMEQ"
+      ],
+      [
+        "FCMEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_n_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ],
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_n_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ],
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_n_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ],
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_n_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ],
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_n_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ],
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_n_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ],
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_n_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ],
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_n_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ],
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq_wide[_n_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ],
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq_wide[_n_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ],
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq_wide[_n_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ],
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq_wide[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq_wide[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpeq_wide[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPEQ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_n_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMGE"
+      ],
+      [
+        "FCMGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_n_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMGE"
+      ],
+      [
+        "FCMGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_n_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGE"
+      ],
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_n_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGE"
+      ],
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_n_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGE"
+      ],
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_n_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGE"
+      ],
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_n_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHS"
+      ],
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_n_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHS"
+      ],
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_n_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHS"
+      ],
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_n_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHS"
+      ],
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge_wide[_n_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGE"
+      ],
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge_wide[_n_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGE"
+      ],
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge_wide[_n_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGE"
+      ],
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge_wide[_n_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHS"
+      ],
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge_wide[_n_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHS"
+      ],
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge_wide[_n_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHS"
+      ],
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge_wide[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge_wide[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge_wide[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge_wide[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge_wide[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpge_wide[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_n_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMGT"
+      ],
+      [
+        "FCMGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_n_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMGT"
+      ],
+      [
+        "FCMGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_n_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGT"
+      ],
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_n_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGT"
+      ],
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_n_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGT"
+      ],
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_n_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGT"
+      ],
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_n_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHI"
+      ],
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_n_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHI"
+      ],
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_n_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHI"
+      ],
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_n_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHI"
+      ],
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt_wide[_n_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGT"
+      ],
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt_wide[_n_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGT"
+      ],
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt_wide[_n_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGT"
+      ],
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt_wide[_n_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHI"
+      ],
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt_wide[_n_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHI"
+      ],
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt_wide[_n_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHI"
+      ],
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt_wide[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt_wide[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt_wide[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt_wide[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt_wide[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpgt_wide[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_n_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMLE"
+      ],
+      [
+        "FCMGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_n_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMLE"
+      ],
+      [
+        "FCMGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_n_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLE"
+      ],
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_n_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLE"
+      ],
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_n_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLE"
+      ],
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_n_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLE"
+      ],
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_n_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLS"
+      ],
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_n_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLS"
+      ],
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_n_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLS"
+      ],
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_n_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLS"
+      ],
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple_wide[_n_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLE"
+      ],
+      [
+        "CMPLE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple_wide[_n_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLE"
+      ],
+      [
+        "CMPLE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple_wide[_n_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLE"
+      ],
+      [
+        "CMPLE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple_wide[_n_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLS"
+      ],
+      [
+        "CMPLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple_wide[_n_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLS"
+      ],
+      [
+        "CMPLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple_wide[_n_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLS"
+      ],
+      [
+        "CMPLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple_wide[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple_wide[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple_wide[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple_wide[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple_wide[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmple_wide[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_n_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMLT"
+      ],
+      [
+        "FCMGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_n_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMLT"
+      ],
+      [
+        "FCMGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_n_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLT"
+      ],
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_n_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLT"
+      ],
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_n_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLT"
+      ],
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_n_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLT"
+      ],
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_n_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLO"
+      ],
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_n_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLO"
+      ],
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_n_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLO"
+      ],
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_n_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLO"
+      ],
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt_wide[_n_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLT"
+      ],
+      [
+        "CMPLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt_wide[_n_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLT"
+      ],
+      [
+        "CMPLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt_wide[_n_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLT"
+      ],
+      [
+        "CMPLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt_wide[_n_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLO"
+      ],
+      [
+        "CMPLO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt_wide[_n_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLO"
+      ],
+      [
+        "CMPLO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt_wide[_n_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLO"
+      ],
+      [
+        "CMPLO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt_wide[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt_wide[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt_wide[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt_wide[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt_wide[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmplt_wide[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPLO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_n_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMNE"
+      ],
+      [
+        "FCMNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_n_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMNE"
+      ],
+      [
+        "FCMNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_n_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ],
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_n_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ],
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_n_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ],
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_n_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ],
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_n_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ],
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_n_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ],
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_n_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ],
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_n_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ],
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne_wide[_n_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ],
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne_wide[_n_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ],
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne_wide[_n_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ],
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne_wide[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne_wide[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpne_wide[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CMPNE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpuo[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMUO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpuo[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMUO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpuo[_n_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMUO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcmpuo[_n_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCMUO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_s16]_m",
+    "arguments": [
+      "svint16_t inactive",
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.H|Ztied.H"
+      },
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNOT"
+      ],
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNOT"
+      ],
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_s32]_m",
+    "arguments": [
+      "svint32_t inactive",
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNOT"
+      ],
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNOT"
+      ],
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_s64]_m",
+    "arguments": [
+      "svint64_t inactive",
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNOT"
+      ],
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNOT"
+      ],
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_s8]_m",
+    "arguments": [
+      "svint8_t inactive",
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.B|Ztied.B"
+      },
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNOT"
+      ],
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B|Ztied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNOT"
+      ],
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_u16]_m",
+    "arguments": [
+      "svuint16_t inactive",
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.H|Ztied.H"
+      },
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNOT"
+      ],
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNOT"
+      ],
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_u32]_m",
+    "arguments": [
+      "svuint32_t inactive",
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNOT"
+      ],
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNOT"
+      ],
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_u64]_m",
+    "arguments": [
+      "svuint64_t inactive",
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNOT"
+      ],
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNOT"
+      ],
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_u8]_m",
+    "arguments": [
+      "svuint8_t inactive",
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.B|Ztied.B"
+      },
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNOT"
+      ],
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B|Ztied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNOT"
+      ],
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnot[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CNOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_f32]_m",
+    "arguments": [
+      "svuint32_t inactive",
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_f64]_m",
+    "arguments": [
+      "svuint64_t inactive",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_s16]_m",
+    "arguments": [
+      "svuint16_t inactive",
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.H|Ztied.H"
+      },
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_s32]_m",
+    "arguments": [
+      "svuint32_t inactive",
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_s64]_m",
+    "arguments": [
+      "svuint64_t inactive",
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_s8]_m",
+    "arguments": [
+      "svuint8_t inactive",
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.B|Ztied.B"
+      },
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B|Ztied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_u16]_m",
+    "arguments": [
+      "svuint16_t inactive",
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.H|Ztied.H"
+      },
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_u32]_m",
+    "arguments": [
+      "svuint32_t inactive",
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_u64]_m",
+    "arguments": [
+      "svuint64_t inactive",
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_u8]_m",
+    "arguments": [
+      "svuint8_t inactive",
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.B|Ztied.B"
+      },
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B|Ztied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNT"
+      ],
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnt[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "CNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcntb",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcntb_pat",
+    "arguments": [
+      "enum svpattern pattern"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcntd",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcntd_pat",
+    "arguments": [
+      "enum svpattern pattern"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnth",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcnth_pat",
+    "arguments": [
+      "enum svpattern pattern"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcntp_b16",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Pop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcntp_b32",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Pop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcntp_b64",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Pop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcntp_b8",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Pop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcntw",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcntw_pat",
+    "arguments": [
+      "enum svpattern pattern"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcompact[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "COMPACT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcompact[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "COMPACT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcompact[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "COMPACT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcompact[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "COMPACT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcompact[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "COMPACT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcompact[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "COMPACT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate2[_f32]",
+    "arguments": [
+      "svfloat32_t x0",
+      "svfloat32_t x1"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate2[_f64]",
+    "arguments": [
+      "svfloat64_t x0",
+      "svfloat64_t x1"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate2[_s16]",
+    "arguments": [
+      "svint16_t x0",
+      "svint16_t x1"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate2[_s32]",
+    "arguments": [
+      "svint32_t x0",
+      "svint32_t x1"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate2[_s64]",
+    "arguments": [
+      "svint64_t x0",
+      "svint64_t x1"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate2[_s8]",
+    "arguments": [
+      "svint8_t x0",
+      "svint8_t x1"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate2[_u16]",
+    "arguments": [
+      "svuint16_t x0",
+      "svuint16_t x1"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate2[_u32]",
+    "arguments": [
+      "svuint32_t x0",
+      "svuint32_t x1"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate2[_u64]",
+    "arguments": [
+      "svuint64_t x0",
+      "svuint64_t x1"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate2[_u8]",
+    "arguments": [
+      "svuint8_t x0",
+      "svuint8_t x1"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate3[_f32]",
+    "arguments": [
+      "svfloat32_t x0",
+      "svfloat32_t x1",
+      "svfloat32_t x2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate3[_f64]",
+    "arguments": [
+      "svfloat64_t x0",
+      "svfloat64_t x1",
+      "svfloat64_t x2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate3[_s16]",
+    "arguments": [
+      "svint16_t x0",
+      "svint16_t x1",
+      "svint16_t x2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate3[_s32]",
+    "arguments": [
+      "svint32_t x0",
+      "svint32_t x1",
+      "svint32_t x2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate3[_s64]",
+    "arguments": [
+      "svint64_t x0",
+      "svint64_t x1",
+      "svint64_t x2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate3[_s8]",
+    "arguments": [
+      "svint8_t x0",
+      "svint8_t x1",
+      "svint8_t x2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate3[_u16]",
+    "arguments": [
+      "svuint16_t x0",
+      "svuint16_t x1",
+      "svuint16_t x2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate3[_u32]",
+    "arguments": [
+      "svuint32_t x0",
+      "svuint32_t x1",
+      "svuint32_t x2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate3[_u64]",
+    "arguments": [
+      "svuint64_t x0",
+      "svuint64_t x1",
+      "svuint64_t x2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate3[_u8]",
+    "arguments": [
+      "svuint8_t x0",
+      "svuint8_t x1",
+      "svuint8_t x2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate4[_f32]",
+    "arguments": [
+      "svfloat32_t x0",
+      "svfloat32_t x1",
+      "svfloat32_t x2",
+      "svfloat32_t x3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate4[_f64]",
+    "arguments": [
+      "svfloat64_t x0",
+      "svfloat64_t x1",
+      "svfloat64_t x2",
+      "svfloat64_t x3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate4[_s16]",
+    "arguments": [
+      "svint16_t x0",
+      "svint16_t x1",
+      "svint16_t x2",
+      "svint16_t x3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate4[_s32]",
+    "arguments": [
+      "svint32_t x0",
+      "svint32_t x1",
+      "svint32_t x2",
+      "svint32_t x3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate4[_s64]",
+    "arguments": [
+      "svint64_t x0",
+      "svint64_t x1",
+      "svint64_t x2",
+      "svint64_t x3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate4[_s8]",
+    "arguments": [
+      "svint8_t x0",
+      "svint8_t x1",
+      "svint8_t x2",
+      "svint8_t x3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate4[_u16]",
+    "arguments": [
+      "svuint16_t x0",
+      "svuint16_t x1",
+      "svuint16_t x2",
+      "svuint16_t x3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate4[_u32]",
+    "arguments": [
+      "svuint32_t x0",
+      "svuint32_t x1",
+      "svuint32_t x2",
+      "svuint32_t x3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate4[_u64]",
+    "arguments": [
+      "svuint64_t x0",
+      "svuint64_t x1",
+      "svuint64_t x2",
+      "svuint64_t x3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcreate4[_u8]",
+    "arguments": [
+      "svuint8_t x0",
+      "svuint8_t x1",
+      "svuint8_t x2",
+      "svuint8_t x3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f32[_f64]_m",
+    "arguments": [
+      "svfloat32_t inactive",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVT"
+      ],
+      [
+        "MOVPRFX",
+        "FCVT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f32[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVT"
+      ],
+      [
+        "MOVPRFX",
+        "FCVT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f32[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FCVT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f32[_s32]_m",
+    "arguments": [
+      "svfloat32_t inactive",
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SCVTF"
+      ],
+      [
+        "MOVPRFX",
+        "SCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f32[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SCVTF"
+      ],
+      [
+        "MOVPRFX",
+        "SCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f32[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f32[_s64]_m",
+    "arguments": [
+      "svfloat32_t inactive",
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SCVTF"
+      ],
+      [
+        "MOVPRFX",
+        "SCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f32[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SCVTF"
+      ],
+      [
+        "MOVPRFX",
+        "SCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f32[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f32[_u32]_m",
+    "arguments": [
+      "svfloat32_t inactive",
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UCVTF"
+      ],
+      [
+        "MOVPRFX",
+        "UCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f32[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UCVTF"
+      ],
+      [
+        "MOVPRFX",
+        "UCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f32[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f32[_u64]_m",
+    "arguments": [
+      "svfloat32_t inactive",
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UCVTF"
+      ],
+      [
+        "MOVPRFX",
+        "UCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f32[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UCVTF"
+      ],
+      [
+        "MOVPRFX",
+        "UCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f32[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f64[_f32]_m",
+    "arguments": [
+      "svfloat64_t inactive",
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVT"
+      ],
+      [
+        "MOVPRFX",
+        "FCVT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f64[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVT"
+      ],
+      [
+        "MOVPRFX",
+        "FCVT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f64[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FCVT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f64[_s32]_m",
+    "arguments": [
+      "svfloat64_t inactive",
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SCVTF"
+      ],
+      [
+        "MOVPRFX",
+        "SCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f64[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SCVTF"
+      ],
+      [
+        "MOVPRFX",
+        "SCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f64[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f64[_s64]_m",
+    "arguments": [
+      "svfloat64_t inactive",
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SCVTF"
+      ],
+      [
+        "MOVPRFX",
+        "SCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f64[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SCVTF"
+      ],
+      [
+        "MOVPRFX",
+        "SCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f64[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f64[_u32]_m",
+    "arguments": [
+      "svfloat64_t inactive",
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UCVTF"
+      ],
+      [
+        "MOVPRFX",
+        "UCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f64[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UCVTF"
+      ],
+      [
+        "MOVPRFX",
+        "UCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f64[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f64[_u64]_m",
+    "arguments": [
+      "svfloat64_t inactive",
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UCVTF"
+      ],
+      [
+        "MOVPRFX",
+        "UCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f64[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UCVTF"
+      ],
+      [
+        "MOVPRFX",
+        "UCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_f64[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UCVTF"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_s32[_f32]_m",
+    "arguments": [
+      "svint32_t inactive",
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTZS"
+      ],
+      [
+        "MOVPRFX",
+        "FCVTZS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_s32[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTZS"
+      ],
+      [
+        "MOVPRFX",
+        "FCVTZS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_s32[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FCVTZS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_s32[_f64]_m",
+    "arguments": [
+      "svint32_t inactive",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTZS"
+      ],
+      [
+        "MOVPRFX",
+        "FCVTZS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_s32[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTZS"
+      ],
+      [
+        "MOVPRFX",
+        "FCVTZS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_s32[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FCVTZS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_s64[_f32]_m",
+    "arguments": [
+      "svint64_t inactive",
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTZS"
+      ],
+      [
+        "MOVPRFX",
+        "FCVTZS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_s64[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTZS"
+      ],
+      [
+        "MOVPRFX",
+        "FCVTZS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_s64[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FCVTZS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_s64[_f64]_m",
+    "arguments": [
+      "svint64_t inactive",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTZS"
+      ],
+      [
+        "MOVPRFX",
+        "FCVTZS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_s64[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTZS"
+      ],
+      [
+        "MOVPRFX",
+        "FCVTZS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_s64[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FCVTZS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_u32[_f32]_m",
+    "arguments": [
+      "svuint32_t inactive",
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTZU"
+      ],
+      [
+        "MOVPRFX",
+        "FCVTZU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_u32[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTZU"
+      ],
+      [
+        "MOVPRFX",
+        "FCVTZU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_u32[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FCVTZU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_u32[_f64]_m",
+    "arguments": [
+      "svuint32_t inactive",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTZU"
+      ],
+      [
+        "MOVPRFX",
+        "FCVTZU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_u32[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTZU"
+      ],
+      [
+        "MOVPRFX",
+        "FCVTZU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_u32[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FCVTZU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_u64[_f32]_m",
+    "arguments": [
+      "svuint64_t inactive",
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTZU"
+      ],
+      [
+        "MOVPRFX",
+        "FCVTZU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_u64[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTZU"
+      ],
+      [
+        "MOVPRFX",
+        "FCVTZU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_u64[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FCVTZU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_u64[_f64]_m",
+    "arguments": [
+      "svuint64_t inactive",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTZU"
+      ],
+      [
+        "MOVPRFX",
+        "FCVTZU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_u64[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTZU"
+      ],
+      [
+        "MOVPRFX",
+        "FCVTZU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svcvt_u64[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FCVTZU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcvtlt_f64[_f32]_m",
+    "arguments": [
+      "svfloat64_t inactive",
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Ztied.D"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcvtlt_f64[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcvtnt_f32[_f64]_m",
+    "arguments": [
+      "svfloat32_t even",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcvtnt_f32[_f64]_x",
+    "arguments": [
+      "svfloat32_t even",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcvtx_f32[_f64]_m",
+    "arguments": [
+      "svfloat32_t inactive",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTX"
+      ],
+      [
+        "MOVPRFX",
+        "FCVTX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcvtx_f32[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTX"
+      ],
+      [
+        "MOVPRFX",
+        "FCVTX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcvtx_f32[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FCVTX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcvtxnt_f32[_f64]_m",
+    "arguments": [
+      "svfloat32_t even",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTXNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svcvtxnt_f32[_f64]_x",
+    "arguments": [
+      "svfloat32_t even",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FCVTXNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FDIV"
+      ],
+      [
+        "MOVPRFX",
+        "FDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FDIV"
+      ],
+      [
+        "FDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "FDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FDIV"
+      ],
+      [
+        "MOVPRFX",
+        "FDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FDIV"
+      ],
+      [
+        "MOVPRFX",
+        "FDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FDIV"
+      ],
+      [
+        "FDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "FDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FDIV"
+      ],
+      [
+        "MOVPRFX",
+        "FDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FDIV"
+      ],
+      [
+        "MOVPRFX",
+        "FDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FDIV"
+      ],
+      [
+        "FDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "FDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FDIV"
+      ],
+      [
+        "MOVPRFX",
+        "FDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FDIV"
+      ],
+      [
+        "MOVPRFX",
+        "FDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FDIV"
+      ],
+      [
+        "FDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "FDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FDIV"
+      ],
+      [
+        "MOVPRFX",
+        "FDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDIV"
+      ],
+      [
+        "MOVPRFX",
+        "SDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDIV"
+      ],
+      [
+        "SDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "SDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SDIV"
+      ],
+      [
+        "MOVPRFX",
+        "SDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDIV"
+      ],
+      [
+        "MOVPRFX",
+        "SDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDIV"
+      ],
+      [
+        "SDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "SDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SDIV"
+      ],
+      [
+        "MOVPRFX",
+        "SDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDIV"
+      ],
+      [
+        "MOVPRFX",
+        "UDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDIV"
+      ],
+      [
+        "UDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "UDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UDIV"
+      ],
+      [
+        "MOVPRFX",
+        "UDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDIV"
+      ],
+      [
+        "MOVPRFX",
+        "UDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDIV"
+      ],
+      [
+        "UDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "UDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UDIV"
+      ],
+      [
+        "MOVPRFX",
+        "UDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDIV"
+      ],
+      [
+        "MOVPRFX",
+        "SDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDIV"
+      ],
+      [
+        "SDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "SDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SDIV"
+      ],
+      [
+        "MOVPRFX",
+        "SDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDIV"
+      ],
+      [
+        "MOVPRFX",
+        "SDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDIV"
+      ],
+      [
+        "SDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "SDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SDIV"
+      ],
+      [
+        "MOVPRFX",
+        "SDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDIV"
+      ],
+      [
+        "MOVPRFX",
+        "UDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDIV"
+      ],
+      [
+        "UDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "UDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UDIV"
+      ],
+      [
+        "MOVPRFX",
+        "UDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDIV"
+      ],
+      [
+        "MOVPRFX",
+        "UDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDIV"
+      ],
+      [
+        "UDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "UDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdiv[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UDIV"
+      ],
+      [
+        "MOVPRFX",
+        "UDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "FDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FDIVR"
+      ],
+      [
+        "FDIV"
+      ],
+      [
+        "MOVPRFX",
+        "FDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "FDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "FDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FDIVR"
+      ],
+      [
+        "FDIV"
+      ],
+      [
+        "MOVPRFX",
+        "FDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "FDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "FDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FDIVR"
+      ],
+      [
+        "FDIV"
+      ],
+      [
+        "MOVPRFX",
+        "FDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "FDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "FDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FDIVR"
+      ],
+      [
+        "FDIV"
+      ],
+      [
+        "MOVPRFX",
+        "FDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "FDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "SDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDIVR"
+      ],
+      [
+        "SDIV"
+      ],
+      [
+        "MOVPRFX",
+        "SDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "SDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "SDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDIVR"
+      ],
+      [
+        "SDIV"
+      ],
+      [
+        "MOVPRFX",
+        "SDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "SDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "UDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDIVR"
+      ],
+      [
+        "UDIV"
+      ],
+      [
+        "MOVPRFX",
+        "UDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "UDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "UDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDIVR"
+      ],
+      [
+        "UDIV"
+      ],
+      [
+        "MOVPRFX",
+        "UDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "UDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "SDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDIVR"
+      ],
+      [
+        "SDIV"
+      ],
+      [
+        "MOVPRFX",
+        "SDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "SDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "SDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDIVR"
+      ],
+      [
+        "SDIV"
+      ],
+      [
+        "MOVPRFX",
+        "SDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "SDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "UDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDIVR"
+      ],
+      [
+        "UDIV"
+      ],
+      [
+        "MOVPRFX",
+        "UDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "UDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "UDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDIVR"
+      ],
+      [
+        "UDIV"
+      ],
+      [
+        "MOVPRFX",
+        "UDIVR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdivr[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UDIVR"
+      ],
+      [
+        "MOVPRFX",
+        "UDIV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdot[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDOT"
+      ],
+      [
+        "MOVPRFX",
+        "SDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdot[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDOT"
+      ],
+      [
+        "MOVPRFX",
+        "SDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdot[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDOT"
+      ],
+      [
+        "MOVPRFX",
+        "UDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdot[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDOT"
+      ],
+      [
+        "MOVPRFX",
+        "UDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdot[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDOT"
+      ],
+      [
+        "MOVPRFX",
+        "SDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdot[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDOT"
+      ],
+      [
+        "MOVPRFX",
+        "SDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdot[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDOT"
+      ],
+      [
+        "MOVPRFX",
+        "UDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdot[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDOT"
+      ],
+      [
+        "MOVPRFX",
+        "UDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdot_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint8_t op2",
+      "svint8_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDOT"
+      ],
+      [
+        "MOVPRFX",
+        "SDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdot_lane[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint16_t op2",
+      "svint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SDOT"
+      ],
+      [
+        "MOVPRFX",
+        "SDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdot_lane[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDOT"
+      ],
+      [
+        "MOVPRFX",
+        "UDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdot_lane[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UDOT"
+      ],
+      [
+        "MOVPRFX",
+        "UDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_b16",
+    "arguments": [
+      "bool op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Hop|Wop"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_b32",
+    "arguments": [
+      "bool op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Sop|Wop"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_b64",
+    "arguments": [
+      "bool op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Dop|Xop"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_b8",
+    "arguments": [
+      "bool op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Bop|Wop"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_f32",
+    "arguments": [
+      "float32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Sop|Wop"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_f32_m",
+    "arguments": [
+      "svfloat32_t inactive",
+      "svbool_t pg",
+      "float32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Ztied.S"
+      },
+      "op": {
+        "register": "Sop|Wop"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "CPY"
+      ],
+      [
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_f32_x",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Sop|Wop"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_f32_z",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Sop|Wop"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP",
+        "FCPY"
+      ],
+      [
+        "DUP",
+        "CPY"
+      ],
+      [
+        "MOVPRFX",
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_f64",
+    "arguments": [
+      "float64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Dop|Xop"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_f64_m",
+    "arguments": [
+      "svfloat64_t inactive",
+      "svbool_t pg",
+      "float64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Ztied.D"
+      },
+      "op": {
+        "register": "Dop|Xop"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "CPY"
+      ],
+      [
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_f64_x",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Dop|Xop"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_f64_z",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Dop|Xop"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP",
+        "FCPY"
+      ],
+      [
+        "DUP",
+        "CPY"
+      ],
+      [
+        "MOVPRFX",
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_s16",
+    "arguments": [
+      "int16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Hop|Wop"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUPM"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_s16_m",
+    "arguments": [
+      "svint16_t inactive",
+      "svbool_t pg",
+      "int16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Ztied.H"
+      },
+      "op": {
+        "register": "Hop|Wop"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "CPY"
+      ],
+      [
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_s16_x",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Hop|Wop"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUPM"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_s16_z",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Hop|Wop"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP",
+        "FCPY"
+      ],
+      [
+        "DUP",
+        "CPY"
+      ],
+      [
+        "MOVPRFX",
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_s32",
+    "arguments": [
+      "int32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Sop|Wop"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUPM"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_s32_m",
+    "arguments": [
+      "svint32_t inactive",
+      "svbool_t pg",
+      "int32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Ztied.S"
+      },
+      "op": {
+        "register": "Sop|Wop"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "CPY"
+      ],
+      [
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_s32_x",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Sop|Wop"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUPM"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_s32_z",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Sop|Wop"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP",
+        "FCPY"
+      ],
+      [
+        "DUP",
+        "CPY"
+      ],
+      [
+        "MOVPRFX",
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_s64",
+    "arguments": [
+      "int64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Dop|Xop"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUPM"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_s64_m",
+    "arguments": [
+      "svint64_t inactive",
+      "svbool_t pg",
+      "int64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Ztied.D"
+      },
+      "op": {
+        "register": "Dop|Xop"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "CPY"
+      ],
+      [
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_s64_x",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Dop|Xop"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUPM"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_s64_z",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Dop|Xop"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP",
+        "FCPY"
+      ],
+      [
+        "DUP",
+        "CPY"
+      ],
+      [
+        "MOVPRFX",
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_s8",
+    "arguments": [
+      "int8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Bop|Wop"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUPM"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_s8_m",
+    "arguments": [
+      "svint8_t inactive",
+      "svbool_t pg",
+      "int8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Ztied.B"
+      },
+      "op": {
+        "register": "Bop|Wop"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "CPY"
+      ],
+      [
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_s8_x",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Bop|Wop"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUPM"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_s8_z",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Bop|Wop"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP",
+        "FCPY"
+      ],
+      [
+        "DUP",
+        "CPY"
+      ],
+      [
+        "MOVPRFX",
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_u16",
+    "arguments": [
+      "uint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Hop|Wop"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUPM"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_u16_m",
+    "arguments": [
+      "svuint16_t inactive",
+      "svbool_t pg",
+      "uint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Ztied.H"
+      },
+      "op": {
+        "register": "Hop|Wop"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "CPY"
+      ],
+      [
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_u16_x",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Hop|Wop"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUPM"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_u16_z",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Hop|Wop"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP",
+        "FCPY"
+      ],
+      [
+        "DUP",
+        "CPY"
+      ],
+      [
+        "MOVPRFX",
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_u32",
+    "arguments": [
+      "uint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Sop|Wop"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUPM"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_u32_m",
+    "arguments": [
+      "svuint32_t inactive",
+      "svbool_t pg",
+      "uint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Ztied.S"
+      },
+      "op": {
+        "register": "Sop|Wop"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "CPY"
+      ],
+      [
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_u32_x",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Sop|Wop"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUPM"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_u32_z",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Sop|Wop"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP",
+        "FCPY"
+      ],
+      [
+        "DUP",
+        "CPY"
+      ],
+      [
+        "MOVPRFX",
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_u64",
+    "arguments": [
+      "uint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Dop|Xop"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUPM"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_u64_m",
+    "arguments": [
+      "svuint64_t inactive",
+      "svbool_t pg",
+      "uint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Ztied.D"
+      },
+      "op": {
+        "register": "Dop|Xop"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "CPY"
+      ],
+      [
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_u64_x",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Dop|Xop"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUPM"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_u64_z",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Dop|Xop"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP",
+        "FCPY"
+      ],
+      [
+        "DUP",
+        "CPY"
+      ],
+      [
+        "MOVPRFX",
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_u8",
+    "arguments": [
+      "uint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Bop|Wop"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUPM"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_u8_m",
+    "arguments": [
+      "svuint8_t inactive",
+      "svbool_t pg",
+      "uint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Ztied.B"
+      },
+      "op": {
+        "register": "Bop|Wop"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "CPY"
+      ],
+      [
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_u8_x",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Bop|Wop"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "FCPY"
+      ],
+      [
+        "FDUP"
+      ],
+      [
+        "DUPM"
+      ],
+      [
+        "DUP"
+      ],
+      [
+        "DUP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup[_n]_u8_z",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Bop|Wop"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CPY"
+      ],
+      [
+        "DUP",
+        "FCPY"
+      ],
+      [
+        "DUP",
+        "CPY"
+      ],
+      [
+        "MOVPRFX",
+        "CPY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup_lane[_f32]",
+    "arguments": [
+      "svfloat32_t data",
+      "uint32_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "index": {
+        "register": "Zindex.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup_lane[_f64]",
+    "arguments": [
+      "svfloat64_t data",
+      "uint64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "index": {
+        "register": "Zindex.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup_lane[_s16]",
+    "arguments": [
+      "svint16_t data",
+      "uint16_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.H"
+      },
+      "index": {
+        "register": "Zindex.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup_lane[_s32]",
+    "arguments": [
+      "svint32_t data",
+      "uint32_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "index": {
+        "register": "Zindex.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup_lane[_s64]",
+    "arguments": [
+      "svint64_t data",
+      "uint64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "index": {
+        "register": "Zindex.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup_lane[_s8]",
+    "arguments": [
+      "svint8_t data",
+      "uint8_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.B"
+      },
+      "index": {
+        "register": "Zindex.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup_lane[_u16]",
+    "arguments": [
+      "svuint16_t data",
+      "uint16_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.H"
+      },
+      "index": {
+        "register": "Zindex.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup_lane[_u32]",
+    "arguments": [
+      "svuint32_t data",
+      "uint32_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "index": {
+        "register": "Zindex.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup_lane[_u64]",
+    "arguments": [
+      "svuint64_t data",
+      "uint64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "index": {
+        "register": "Zindex.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdup_lane[_u8]",
+    "arguments": [
+      "svuint8_t data",
+      "uint8_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.B"
+      },
+      "index": {
+        "register": "Zindex.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq[_n]_b16",
+    "arguments": [
+      "bool x0",
+      "bool x1",
+      "bool x2",
+      "bool x3",
+      "bool x4",
+      "bool x5",
+      "bool x6",
+      "bool x7"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq[_n]_b32",
+    "arguments": [
+      "bool x0",
+      "bool x1",
+      "bool x2",
+      "bool x3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq[_n]_b64",
+    "arguments": [
+      "bool x0",
+      "bool x1"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq[_n]_b8",
+    "arguments": [
+      "bool x0",
+      "bool x1",
+      "bool x2",
+      "bool x3",
+      "bool x4",
+      "bool x5",
+      "bool x6",
+      "bool x7",
+      "bool x8",
+      "bool x9",
+      "bool x10",
+      "bool x11",
+      "bool x12",
+      "bool x13",
+      "bool x14",
+      "bool x15"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq[_n]_f32",
+    "arguments": [
+      "float32_t x0",
+      "float32_t x1",
+      "float32_t x2",
+      "float32_t x3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq[_n]_f64",
+    "arguments": [
+      "float64_t x0",
+      "float64_t x1"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq[_n]_s16",
+    "arguments": [
+      "int16_t x0",
+      "int16_t x1",
+      "int16_t x2",
+      "int16_t x3",
+      "int16_t x4",
+      "int16_t x5",
+      "int16_t x6",
+      "int16_t x7"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq[_n]_s32",
+    "arguments": [
+      "int32_t x0",
+      "int32_t x1",
+      "int32_t x2",
+      "int32_t x3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq[_n]_s64",
+    "arguments": [
+      "int64_t x0",
+      "int64_t x1"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq[_n]_s8",
+    "arguments": [
+      "int8_t x0",
+      "int8_t x1",
+      "int8_t x2",
+      "int8_t x3",
+      "int8_t x4",
+      "int8_t x5",
+      "int8_t x6",
+      "int8_t x7",
+      "int8_t x8",
+      "int8_t x9",
+      "int8_t x10",
+      "int8_t x11",
+      "int8_t x12",
+      "int8_t x13",
+      "int8_t x14",
+      "int8_t x15"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq[_n]_u16",
+    "arguments": [
+      "uint16_t x0",
+      "uint16_t x1",
+      "uint16_t x2",
+      "uint16_t x3",
+      "uint16_t x4",
+      "uint16_t x5",
+      "uint16_t x6",
+      "uint16_t x7"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq[_n]_u32",
+    "arguments": [
+      "uint32_t x0",
+      "uint32_t x1",
+      "uint32_t x2",
+      "uint32_t x3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq[_n]_u64",
+    "arguments": [
+      "uint64_t x0",
+      "uint64_t x1"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq[_n]_u8",
+    "arguments": [
+      "uint8_t x0",
+      "uint8_t x1",
+      "uint8_t x2",
+      "uint8_t x3",
+      "uint8_t x4",
+      "uint8_t x5",
+      "uint8_t x6",
+      "uint8_t x7",
+      "uint8_t x8",
+      "uint8_t x9",
+      "uint8_t x10",
+      "uint8_t x11",
+      "uint8_t x12",
+      "uint8_t x13",
+      "uint8_t x14",
+      "uint8_t x15"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq_lane[_f32]",
+    "arguments": [
+      "svfloat32_t data",
+      "uint64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D|Zdata.Q"
+      },
+      "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": {
+        "register": "Zindices_d.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq_lane[_f64]",
+    "arguments": [
+      "svfloat64_t data",
+      "uint64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D|Zdata.Q"
+      },
+      "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": {
+        "register": "Zindices_d.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq_lane[_s16]",
+    "arguments": [
+      "svint16_t data",
+      "uint64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D|Zdata.Q"
+      },
+      "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": {
+        "register": "Zindices_d.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq_lane[_s32]",
+    "arguments": [
+      "svint32_t data",
+      "uint64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D|Zdata.Q"
+      },
+      "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": {
+        "register": "Zindices_d.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq_lane[_s64]",
+    "arguments": [
+      "svint64_t data",
+      "uint64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D|Zdata.Q"
+      },
+      "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": {
+        "register": "Zindices_d.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq_lane[_s8]",
+    "arguments": [
+      "svint8_t data",
+      "uint64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D|Zdata.Q"
+      },
+      "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": {
+        "register": "Zindices_d.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq_lane[_u16]",
+    "arguments": [
+      "svuint16_t data",
+      "uint64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D|Zdata.Q"
+      },
+      "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": {
+        "register": "Zindices_d.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq_lane[_u32]",
+    "arguments": [
+      "svuint32_t data",
+      "uint64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D|Zdata.Q"
+      },
+      "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": {
+        "register": "Zindices_d.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq_lane[_u64]",
+    "arguments": [
+      "svuint64_t data",
+      "uint64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D|Zdata.Q"
+      },
+      "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": {
+        "register": "Zindices_d.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svdupq_lane[_u8]",
+    "arguments": [
+      "svuint8_t data",
+      "uint64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D|Zdata.Q"
+      },
+      "{2 * index, 2 * index + 1, 2 * index, 2 * index + 1, …}": {
+        "register": "Zindices_d.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "DUP"
+      ],
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveor3[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]|Ztied3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "MOVPRFX",
+        "EOR3"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveor3[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]|Ztied3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "MOVPRFX",
+        "EOR3"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveor3[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]|Ztied3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "MOVPRFX",
+        "EOR3"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveor3[_n_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]|Ztied3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "MOVPRFX",
+        "EOR3"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveor3[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]|Ztied3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "MOVPRFX",
+        "EOR3"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveor3[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]|Ztied3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "MOVPRFX",
+        "EOR3"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveor3[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]|Ztied3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "MOVPRFX",
+        "EOR3"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveor3[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]|Ztied3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "MOVPRFX",
+        "EOR3"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveor3[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H|Ztied3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "MOVPRFX",
+        "EOR3"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveor3[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S|Ztied3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "MOVPRFX",
+        "EOR3"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveor3[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D|Ztied3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "MOVPRFX",
+        "EOR3"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveor3[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B|Ztied3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "MOVPRFX",
+        "EOR3"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveor3[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H|Ztied3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "MOVPRFX",
+        "EOR3"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveor3[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S|Ztied3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "MOVPRFX",
+        "EOR3"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveor3[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D|Ztied3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "MOVPRFX",
+        "EOR3"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveor3[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B|Ztied3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "EOR3"
+      ],
+      [
+        "MOVPRFX",
+        "EOR3"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_b]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.B"
+      },
+      "op2": {
+        "register": "Pop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ],
+      [
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveor[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "EOR"
+      ],
+      [
+        "MOVPRFX",
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveorbt[_n_s16]",
+    "arguments": [
+      "svint16_t odd",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "odd": {
+        "register": "Zodd.H|Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORBT"
+      ],
+      [
+        "MOVPRFX",
+        "EORBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveorbt[_n_s32]",
+    "arguments": [
+      "svint32_t odd",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "odd": {
+        "register": "Zodd.S|Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORBT"
+      ],
+      [
+        "MOVPRFX",
+        "EORBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveorbt[_n_s64]",
+    "arguments": [
+      "svint64_t odd",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "odd": {
+        "register": "Zodd.D|Ztied.D"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORBT"
+      ],
+      [
+        "MOVPRFX",
+        "EORBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveorbt[_n_s8]",
+    "arguments": [
+      "svint8_t odd",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "odd": {
+        "register": "Zodd.B|Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORBT"
+      ],
+      [
+        "MOVPRFX",
+        "EORBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveorbt[_n_u16]",
+    "arguments": [
+      "svuint16_t odd",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "odd": {
+        "register": "Zodd.H|Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORBT"
+      ],
+      [
+        "MOVPRFX",
+        "EORBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveorbt[_n_u32]",
+    "arguments": [
+      "svuint32_t odd",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "odd": {
+        "register": "Zodd.S|Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORBT"
+      ],
+      [
+        "MOVPRFX",
+        "EORBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveorbt[_n_u64]",
+    "arguments": [
+      "svuint64_t odd",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "odd": {
+        "register": "Zodd.D|Ztied.D"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORBT"
+      ],
+      [
+        "MOVPRFX",
+        "EORBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveorbt[_n_u8]",
+    "arguments": [
+      "svuint8_t odd",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "odd": {
+        "register": "Zodd.B|Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORBT"
+      ],
+      [
+        "MOVPRFX",
+        "EORBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveorbt[_s16]",
+    "arguments": [
+      "svint16_t odd",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "odd": {
+        "register": "Zodd.H|Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORBT"
+      ],
+      [
+        "MOVPRFX",
+        "EORBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveorbt[_s32]",
+    "arguments": [
+      "svint32_t odd",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "odd": {
+        "register": "Zodd.S|Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORBT"
+      ],
+      [
+        "MOVPRFX",
+        "EORBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveorbt[_s64]",
+    "arguments": [
+      "svint64_t odd",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "odd": {
+        "register": "Zodd.D|Ztied.D"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORBT"
+      ],
+      [
+        "MOVPRFX",
+        "EORBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveorbt[_s8]",
+    "arguments": [
+      "svint8_t odd",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "odd": {
+        "register": "Zodd.B|Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORBT"
+      ],
+      [
+        "MOVPRFX",
+        "EORBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveorbt[_u16]",
+    "arguments": [
+      "svuint16_t odd",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "odd": {
+        "register": "Zodd.H|Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORBT"
+      ],
+      [
+        "MOVPRFX",
+        "EORBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveorbt[_u32]",
+    "arguments": [
+      "svuint32_t odd",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "odd": {
+        "register": "Zodd.S|Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORBT"
+      ],
+      [
+        "MOVPRFX",
+        "EORBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveorbt[_u64]",
+    "arguments": [
+      "svuint64_t odd",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "odd": {
+        "register": "Zodd.D|Ztied.D"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORBT"
+      ],
+      [
+        "MOVPRFX",
+        "EORBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveorbt[_u8]",
+    "arguments": [
+      "svuint8_t odd",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "odd": {
+        "register": "Zodd.B|Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORBT"
+      ],
+      [
+        "MOVPRFX",
+        "EORBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveortb[_n_s16]",
+    "arguments": [
+      "svint16_t even",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Zeven.H|Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORTB"
+      ],
+      [
+        "MOVPRFX",
+        "EORTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveortb[_n_s32]",
+    "arguments": [
+      "svint32_t even",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Zeven.S|Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORTB"
+      ],
+      [
+        "MOVPRFX",
+        "EORTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveortb[_n_s64]",
+    "arguments": [
+      "svint64_t even",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Zeven.D|Ztied.D"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORTB"
+      ],
+      [
+        "MOVPRFX",
+        "EORTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveortb[_n_s8]",
+    "arguments": [
+      "svint8_t even",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Zeven.B|Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORTB"
+      ],
+      [
+        "MOVPRFX",
+        "EORTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveortb[_n_u16]",
+    "arguments": [
+      "svuint16_t even",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Zeven.H|Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORTB"
+      ],
+      [
+        "MOVPRFX",
+        "EORTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveortb[_n_u32]",
+    "arguments": [
+      "svuint32_t even",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Zeven.S|Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORTB"
+      ],
+      [
+        "MOVPRFX",
+        "EORTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveortb[_n_u64]",
+    "arguments": [
+      "svuint64_t even",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Zeven.D|Ztied.D"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORTB"
+      ],
+      [
+        "MOVPRFX",
+        "EORTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveortb[_n_u8]",
+    "arguments": [
+      "svuint8_t even",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Zeven.B|Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORTB"
+      ],
+      [
+        "MOVPRFX",
+        "EORTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveortb[_s16]",
+    "arguments": [
+      "svint16_t even",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Zeven.H|Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORTB"
+      ],
+      [
+        "MOVPRFX",
+        "EORTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveortb[_s32]",
+    "arguments": [
+      "svint32_t even",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Zeven.S|Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORTB"
+      ],
+      [
+        "MOVPRFX",
+        "EORTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveortb[_s64]",
+    "arguments": [
+      "svint64_t even",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Zeven.D|Ztied.D"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORTB"
+      ],
+      [
+        "MOVPRFX",
+        "EORTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveortb[_s8]",
+    "arguments": [
+      "svint8_t even",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Zeven.B|Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORTB"
+      ],
+      [
+        "MOVPRFX",
+        "EORTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveortb[_u16]",
+    "arguments": [
+      "svuint16_t even",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Zeven.H|Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORTB"
+      ],
+      [
+        "MOVPRFX",
+        "EORTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveortb[_u32]",
+    "arguments": [
+      "svuint32_t even",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Zeven.S|Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORTB"
+      ],
+      [
+        "MOVPRFX",
+        "EORTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveortb[_u64]",
+    "arguments": [
+      "svuint64_t even",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Zeven.D|Ztied.D"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORTB"
+      ],
+      [
+        "MOVPRFX",
+        "EORTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "sveortb[_u8]",
+    "arguments": [
+      "svuint8_t even",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Zeven.B|Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORTB"
+      ],
+      [
+        "MOVPRFX",
+        "EORTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveorv[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "int16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveorv[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveorv[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveorv[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "int8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveorv[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "uint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveorv[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveorv[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "sveorv[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "uint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EORV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svexpa[_f32]",
+    "arguments": [
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FEXPA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svexpa[_f64]",
+    "arguments": [
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FEXPA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svext[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 63
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EXT"
+      ],
+      [
+        "MOVPRFX",
+        "EXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svext[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 31
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EXT"
+      ],
+      [
+        "MOVPRFX",
+        "EXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svext[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 127
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EXT"
+      ],
+      [
+        "MOVPRFX",
+        "EXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svext[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 63
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EXT"
+      ],
+      [
+        "MOVPRFX",
+        "EXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svext[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 31
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EXT"
+      ],
+      [
+        "MOVPRFX",
+        "EXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svext[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 255
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EXT"
+      ],
+      [
+        "MOVPRFX",
+        "EXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svext[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 127
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EXT"
+      ],
+      [
+        "MOVPRFX",
+        "EXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svext[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 63
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EXT"
+      ],
+      [
+        "MOVPRFX",
+        "EXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svext[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 31
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EXT"
+      ],
+      [
+        "MOVPRFX",
+        "EXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svext[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 255
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EXT"
+      ],
+      [
+        "MOVPRFX",
+        "EXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextb[_s16]_m",
+    "arguments": [
+      "svint16_t inactive",
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.H|Ztied.H"
+      },
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SXTB"
+      ],
+      [
+        "MOVPRFX",
+        "SXTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextb[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SXTB"
+      ],
+      [
+        "MOVPRFX",
+        "SXTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextb[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SXTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextb[_s32]_m",
+    "arguments": [
+      "svint32_t inactive",
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SXTB"
+      ],
+      [
+        "MOVPRFX",
+        "SXTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextb[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SXTB"
+      ],
+      [
+        "MOVPRFX",
+        "SXTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextb[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SXTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextb[_s64]_m",
+    "arguments": [
+      "svint64_t inactive",
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SXTB"
+      ],
+      [
+        "MOVPRFX",
+        "SXTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextb[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SXTB"
+      ],
+      [
+        "MOVPRFX",
+        "SXTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextb[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SXTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextb[_u16]_m",
+    "arguments": [
+      "svuint16_t inactive",
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.H|Ztied.H"
+      },
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UXTB"
+      ],
+      [
+        "MOVPRFX",
+        "UXTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextb[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UXTB"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextb[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UXTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextb[_u32]_m",
+    "arguments": [
+      "svuint32_t inactive",
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UXTB"
+      ],
+      [
+        "MOVPRFX",
+        "UXTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextb[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UXTB"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextb[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UXTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextb[_u64]_m",
+    "arguments": [
+      "svuint64_t inactive",
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UXTB"
+      ],
+      [
+        "MOVPRFX",
+        "UXTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextb[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UXTB"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextb[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UXTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svexth[_s32]_m",
+    "arguments": [
+      "svint32_t inactive",
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SXTH"
+      ],
+      [
+        "MOVPRFX",
+        "SXTH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svexth[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SXTH"
+      ],
+      [
+        "MOVPRFX",
+        "SXTH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svexth[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SXTH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svexth[_s64]_m",
+    "arguments": [
+      "svint64_t inactive",
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SXTH"
+      ],
+      [
+        "MOVPRFX",
+        "SXTH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svexth[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SXTH"
+      ],
+      [
+        "MOVPRFX",
+        "SXTH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svexth[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SXTH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svexth[_u32]_m",
+    "arguments": [
+      "svuint32_t inactive",
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UXTH"
+      ],
+      [
+        "MOVPRFX",
+        "UXTH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svexth[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UXTH"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svexth[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UXTH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svexth[_u64]_m",
+    "arguments": [
+      "svuint64_t inactive",
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UXTH"
+      ],
+      [
+        "MOVPRFX",
+        "UXTH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svexth[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UXTH"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svexth[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UXTH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextw[_s64]_m",
+    "arguments": [
+      "svint64_t inactive",
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SXTW"
+      ],
+      [
+        "MOVPRFX",
+        "SXTW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextw[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SXTW"
+      ],
+      [
+        "MOVPRFX",
+        "SXTW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextw[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SXTW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextw[_u64]_m",
+    "arguments": [
+      "svuint64_t inactive",
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UXTW"
+      ],
+      [
+        "MOVPRFX",
+        "UXTW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextw[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UXTW"
+      ],
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svextw[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UXTW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget2[_f32]",
+    "arguments": [
+      "svfloat32x2_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget2[_f64]",
+    "arguments": [
+      "svfloat64x2_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget2[_s16]",
+    "arguments": [
+      "svint16x2_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget2[_s32]",
+    "arguments": [
+      "svint32x2_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget2[_s64]",
+    "arguments": [
+      "svint64x2_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget2[_s8]",
+    "arguments": [
+      "svint8x2_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget2[_u16]",
+    "arguments": [
+      "svuint16x2_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget2[_u32]",
+    "arguments": [
+      "svuint32x2_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget2[_u64]",
+    "arguments": [
+      "svuint64x2_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget2[_u8]",
+    "arguments": [
+      "svuint8x2_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget3[_f32]",
+    "arguments": [
+      "svfloat32x3_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget3[_f64]",
+    "arguments": [
+      "svfloat64x3_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget3[_s16]",
+    "arguments": [
+      "svint16x3_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget3[_s32]",
+    "arguments": [
+      "svint32x3_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget3[_s64]",
+    "arguments": [
+      "svint64x3_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget3[_s8]",
+    "arguments": [
+      "svint8x3_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget3[_u16]",
+    "arguments": [
+      "svuint16x3_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget3[_u32]",
+    "arguments": [
+      "svuint32x3_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget3[_u64]",
+    "arguments": [
+      "svuint64x3_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget3[_u8]",
+    "arguments": [
+      "svuint8x3_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget4[_f32]",
+    "arguments": [
+      "svfloat32x4_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget4[_f64]",
+    "arguments": [
+      "svfloat64x4_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget4[_s16]",
+    "arguments": [
+      "svint16x4_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget4[_s32]",
+    "arguments": [
+      "svint32x4_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget4[_s64]",
+    "arguments": [
+      "svint64x4_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget4[_s8]",
+    "arguments": [
+      "svint8x4_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget4[_u16]",
+    "arguments": [
+      "svuint16x4_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget4[_u32]",
+    "arguments": [
+      "svuint32x4_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget4[_u64]",
+    "arguments": [
+      "svuint64x4_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svget4[_u8]",
+    "arguments": [
+      "svuint8x4_t tuple",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHADD"
+      ],
+      [
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHADD"
+      ],
+      [
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHADD"
+      ],
+      [
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHADD"
+      ],
+      [
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHADD"
+      ],
+      [
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHADD"
+      ],
+      [
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHADD"
+      ],
+      [
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHADD"
+      ],
+      [
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHADD"
+      ],
+      [
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHADD"
+      ],
+      [
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHADD"
+      ],
+      [
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHADD"
+      ],
+      [
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHADD"
+      ],
+      [
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHADD"
+      ],
+      [
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHADD"
+      ],
+      [
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHADD"
+      ],
+      [
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhadd[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHADD"
+      ],
+      [
+        "MOVPRFX",
+        "UHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhistcnt[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "HISTCNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhistcnt[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "HISTCNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhistcnt[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "HISTCNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhistcnt[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "HISTCNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhistseg[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "HISTSEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhistseg[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "HISTSEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUB"
+      ],
+      [
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUB"
+      ],
+      [
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUB"
+      ],
+      [
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUB"
+      ],
+      [
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUB"
+      ],
+      [
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUB"
+      ],
+      [
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUB"
+      ],
+      [
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUB"
+      ],
+      [
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUB"
+      ],
+      [
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUB"
+      ],
+      [
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUB"
+      ],
+      [
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUB"
+      ],
+      [
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUB"
+      ],
+      [
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUB"
+      ],
+      [
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUB"
+      ],
+      [
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUB"
+      ],
+      [
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsub[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUBR"
+      ],
+      [
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUBR"
+      ],
+      [
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUBR"
+      ],
+      [
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUBR"
+      ],
+      [
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUBR"
+      ],
+      [
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUBR"
+      ],
+      [
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUBR"
+      ],
+      [
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUBR"
+      ],
+      [
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUBR"
+      ],
+      [
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUBR"
+      ],
+      [
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUBR"
+      ],
+      [
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHSUBR"
+      ],
+      [
+        "SHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUBR"
+      ],
+      [
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUBR"
+      ],
+      [
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUBR"
+      ],
+      [
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UHSUBR"
+      ],
+      [
+        "UHSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svhsubr[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UHSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UHSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svindex_s16",
+    "arguments": [
+      "int16_t base",
+      "int16_t step"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Wbase"
+      },
+      "step": {
+        "register": "Wstep"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svindex_s32",
+    "arguments": [
+      "int32_t base",
+      "int32_t step"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Wbase"
+      },
+      "step": {
+        "register": "Wstep"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svindex_s64",
+    "arguments": [
+      "int64_t base",
+      "int64_t step"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "step": {
+        "register": "Xstep"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svindex_s8",
+    "arguments": [
+      "int8_t base",
+      "int8_t step"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Wbase"
+      },
+      "step": {
+        "register": "Wstep"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svindex_u16",
+    "arguments": [
+      "uint16_t base",
+      "uint16_t step"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Wbase"
+      },
+      "step": {
+        "register": "Wstep"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svindex_u32",
+    "arguments": [
+      "uint32_t base",
+      "uint32_t step"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Wbase"
+      },
+      "step": {
+        "register": "Wstep"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svindex_u64",
+    "arguments": [
+      "uint64_t base",
+      "uint64_t step"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "step": {
+        "register": "Xstep"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svindex_u8",
+    "arguments": [
+      "uint8_t base",
+      "uint8_t step"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Wbase"
+      },
+      "step": {
+        "register": "Wstep"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ],
+      [
+        "INDEX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svinsr[_n_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Sop2|Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "INSR"
+      ],
+      [
+        "INSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svinsr[_n_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Dop2|Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "INSR"
+      ],
+      [
+        "INSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svinsr[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Hop2|Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "INSR"
+      ],
+      [
+        "INSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svinsr[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Sop2|Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "INSR"
+      ],
+      [
+        "INSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svinsr[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Dop2|Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "INSR"
+      ],
+      [
+        "INSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svinsr[_n_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Bop2|Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "INSR"
+      ],
+      [
+        "INSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svinsr[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Hop2|Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "INSR"
+      ],
+      [
+        "INSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svinsr[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Sop2|Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "INSR"
+      ],
+      [
+        "INSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svinsr[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Dop2|Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "INSR"
+      ],
+      [
+        "INSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svinsr[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Bop2|Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "INSR"
+      ],
+      [
+        "INSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlasta[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "float32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTA"
+      ],
+      [
+        "LASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlasta[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "float64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTA"
+      ],
+      [
+        "LASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlasta[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "int16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTA"
+      ],
+      [
+        "LASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlasta[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTA"
+      ],
+      [
+        "LASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlasta[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTA"
+      ],
+      [
+        "LASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlasta[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "int8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTA"
+      ],
+      [
+        "LASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlasta[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "uint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTA"
+      ],
+      [
+        "LASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlasta[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTA"
+      ],
+      [
+        "LASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlasta[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTA"
+      ],
+      [
+        "LASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlasta[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "uint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTA"
+      ],
+      [
+        "LASTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlastb[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "float32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTB"
+      ],
+      [
+        "LASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlastb[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "float64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTB"
+      ],
+      [
+        "LASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlastb[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "int16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTB"
+      ],
+      [
+        "LASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlastb[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTB"
+      ],
+      [
+        "LASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlastb[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTB"
+      ],
+      [
+        "LASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlastb[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "int8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTB"
+      ],
+      [
+        "LASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlastb[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "uint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTB"
+      ],
+      [
+        "LASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlastb[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTB"
+      ],
+      [
+        "LASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlastb[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTB"
+      ],
+      [
+        "LASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlastb[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "uint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LASTB"
+      ],
+      [
+        "LASTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ],
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ],
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ],
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather[_u32base]_f32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather[_u32base]_index_f32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather[_u32base]_index_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather[_u32base]_index_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather[_u32base]_offset_f32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather[_u32base]_offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather[_u32base]_offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather[_u32base]_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather[_u32base]_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather[_u64base]_f64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather[_u64base]_index_f64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 8": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ],
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather[_u64base]_index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 8": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ],
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather[_u64base]_index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 8": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ],
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather[_u64base]_offset_f64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ],
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ],
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ],
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[s32]index[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base",
+      "svint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[s32]index[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[s32]index[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[s32]offset[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[s32]offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[s32]offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[s64]index[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[s64]index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[s64]index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[s64]offset[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[s64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[s64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[u32]index[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[u32]index[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[u32]index[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[u32]offset[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[u32]offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[u32]offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[u64]index[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[u64]index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[u64]index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[u64]offset[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[u64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_gather_[u64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_vnum[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_vnum[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ],
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_vnum[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_vnum[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_vnum[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ],
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_vnum[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_vnum[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_vnum[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_vnum[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1D"
+      ],
+      [
+        "LD1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1_vnum[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ro[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1ROW"
+      ],
+      [
+        "LD1ROW"
+      ],
+      [
+        "LD1ROW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ro[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1ROD"
+      ],
+      [
+        "LD1ROD"
+      ],
+      [
+        "LD1ROD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ro[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1ROH"
+      ],
+      [
+        "LD1ROH"
+      ],
+      [
+        "LD1ROH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ro[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1ROW"
+      ],
+      [
+        "LD1ROW"
+      ],
+      [
+        "LD1ROW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ro[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1ROD"
+      ],
+      [
+        "LD1ROD"
+      ],
+      [
+        "LD1ROD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ro[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1ROB"
+      ],
+      [
+        "LD1ROB"
+      ],
+      [
+        "LD1ROB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ro[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1ROH"
+      ],
+      [
+        "LD1ROH"
+      ],
+      [
+        "LD1ROH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ro[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1ROW"
+      ],
+      [
+        "LD1ROW"
+      ],
+      [
+        "LD1ROW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ro[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1ROD"
+      ],
+      [
+        "LD1ROD"
+      ],
+      [
+        "LD1ROD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ro[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1ROB"
+      ],
+      [
+        "LD1ROB"
+      ],
+      [
+        "LD1ROB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1rq[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1RQW"
+      ],
+      [
+        "LD1RQW"
+      ],
+      [
+        "LD1RQW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1rq[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1RQD"
+      ],
+      [
+        "LD1RQD"
+      ],
+      [
+        "LD1RQD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1rq[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1RQH"
+      ],
+      [
+        "LD1RQH"
+      ],
+      [
+        "LD1RQH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1rq[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1RQW"
+      ],
+      [
+        "LD1RQW"
+      ],
+      [
+        "LD1RQW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1rq[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1RQD"
+      ],
+      [
+        "LD1RQD"
+      ],
+      [
+        "LD1RQD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1rq[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1RQB"
+      ],
+      [
+        "LD1RQB"
+      ],
+      [
+        "LD1RQB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1rq[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1RQH"
+      ],
+      [
+        "LD1RQH"
+      ],
+      [
+        "LD1RQH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1rq[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1RQW"
+      ],
+      [
+        "LD1RQW"
+      ],
+      [
+        "LD1RQW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1rq[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1RQD"
+      ],
+      [
+        "LD1RQD"
+      ],
+      [
+        "LD1RQD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1rq[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1RQB"
+      ],
+      [
+        "LD1RQB"
+      ],
+      [
+        "LD1RQB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_gather[_u32base]_offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ],
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_gather[_u32base]_offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ],
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_gather[_u32base]_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_gather[_u32base]_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ],
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ],
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_gather_[s32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_gather_[s32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_gather_[s64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_gather_[s64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_gather_[u32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_gather_[u32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_gather_[u64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_gather_[u64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_s16",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ],
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ],
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ],
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_u16",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ],
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ],
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ],
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_vnum_s16",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ],
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_vnum_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ],
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_vnum_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ],
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_vnum_u16",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ],
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_vnum_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ],
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sb_vnum_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SB"
+      ],
+      [
+        "LD1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather[_u32base]_index_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ],
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather[_u32base]_index_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ],
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather[_u32base]_offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ],
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather[_u32base]_offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ],
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather[_u32base]_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather[_u32base]_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather[_u64base]_index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ],
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather[_u64base]_index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ],
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ],
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ],
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather_[s32]index_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather_[s32]index_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather_[s32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather_[s32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather_[s64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather_[s64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather_[s64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather_[s64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather_[u32]index_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather_[u32]index_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather_[u32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather_[u32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather_[u64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather_[u64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather_[u64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_gather_[u64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ],
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ],
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ],
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ],
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_vnum_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ],
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_vnum_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ],
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_vnum_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ],
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sh_vnum_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SH"
+      ],
+      [
+        "LD1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sw_gather[_u64base]_index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SW"
+      ],
+      [
+        "LD1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sw_gather[_u64base]_index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SW"
+      ],
+      [
+        "LD1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sw_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SW"
+      ],
+      [
+        "LD1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sw_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SW"
+      ],
+      [
+        "LD1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sw_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sw_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sw_gather_[s64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sw_gather_[s64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sw_gather_[s64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sw_gather_[s64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sw_gather_[u64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sw_gather_[u64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sw_gather_[u64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sw_gather_[u64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sw_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SW"
+      ],
+      [
+        "LD1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sw_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SW"
+      ],
+      [
+        "LD1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sw_vnum_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SW"
+      ],
+      [
+        "LD1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1sw_vnum_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1SW"
+      ],
+      [
+        "LD1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_gather[_u32base]_offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_gather[_u32base]_offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_gather[_u32base]_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_gather[_u32base]_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_gather_[s32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_gather_[s32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_gather_[s64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_gather_[s64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_gather_[u32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_gather_[u32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_gather_[u64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_gather_[u64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_s16",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_u16",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_vnum_s16",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_vnum_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_vnum_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_vnum_u16",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_vnum_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1ub_vnum_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1B"
+      ],
+      [
+        "LD1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather[_u32base]_index_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather[_u32base]_index_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather[_u32base]_offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather[_u32base]_offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather[_u32base]_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather[_u32base]_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather[_u64base]_index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather[_u64base]_index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather_[s32]index_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather_[s32]index_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather_[s32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather_[s32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather_[s64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather_[s64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather_[s64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather_[s64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather_[u32]index_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather_[u32]index_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather_[u32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather_[u32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather_[u64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather_[u64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather_[u64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_gather_[u64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_vnum_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_vnum_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_vnum_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uh_vnum_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1H"
+      ],
+      [
+        "LD1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uw_gather[_u64base]_index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uw_gather[_u64base]_index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uw_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uw_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uw_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uw_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uw_gather_[s64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uw_gather_[s64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uw_gather_[s64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uw_gather_[s64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uw_gather_[u64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uw_gather_[u64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uw_gather_[u64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uw_gather_[u64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uw_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uw_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uw_vnum_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld1uw_vnum_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD1W"
+      ],
+      [
+        "LD1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2W"
+      ],
+      [
+        "LD2W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2D"
+      ],
+      [
+        "LD2D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2H"
+      ],
+      [
+        "LD2H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2W"
+      ],
+      [
+        "LD2W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2D"
+      ],
+      [
+        "LD2D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2B"
+      ],
+      [
+        "LD2B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2H"
+      ],
+      [
+        "LD2H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2W"
+      ],
+      [
+        "LD2W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2D"
+      ],
+      [
+        "LD2D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2B"
+      ],
+      [
+        "LD2B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2_vnum[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2W"
+      ],
+      [
+        "LD2W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2_vnum[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2D"
+      ],
+      [
+        "LD2D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2_vnum[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2H"
+      ],
+      [
+        "LD2H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2_vnum[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2W"
+      ],
+      [
+        "LD2W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2_vnum[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2D"
+      ],
+      [
+        "LD2D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2_vnum[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2B"
+      ],
+      [
+        "LD2B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2_vnum[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2H"
+      ],
+      [
+        "LD2H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2_vnum[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2W"
+      ],
+      [
+        "LD2W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2_vnum[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2D"
+      ],
+      [
+        "LD2D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld2_vnum[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8x2_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD2B"
+      ],
+      [
+        "LD2B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3W"
+      ],
+      [
+        "LD3W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3D"
+      ],
+      [
+        "LD3D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3H"
+      ],
+      [
+        "LD3H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3W"
+      ],
+      [
+        "LD3W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3D"
+      ],
+      [
+        "LD3D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3B"
+      ],
+      [
+        "LD3B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3H"
+      ],
+      [
+        "LD3H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3W"
+      ],
+      [
+        "LD3W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3D"
+      ],
+      [
+        "LD3D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3B"
+      ],
+      [
+        "LD3B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3_vnum[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3W"
+      ],
+      [
+        "LD3W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3_vnum[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3D"
+      ],
+      [
+        "LD3D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3_vnum[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3H"
+      ],
+      [
+        "LD3H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3_vnum[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3W"
+      ],
+      [
+        "LD3W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3_vnum[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3D"
+      ],
+      [
+        "LD3D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3_vnum[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3B"
+      ],
+      [
+        "LD3B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3_vnum[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3H"
+      ],
+      [
+        "LD3H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3_vnum[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3W"
+      ],
+      [
+        "LD3W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3_vnum[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3D"
+      ],
+      [
+        "LD3D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld3_vnum[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8x3_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD3B"
+      ],
+      [
+        "LD3B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4W"
+      ],
+      [
+        "LD4W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4D"
+      ],
+      [
+        "LD4D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4H"
+      ],
+      [
+        "LD4H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4W"
+      ],
+      [
+        "LD4W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4D"
+      ],
+      [
+        "LD4D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4B"
+      ],
+      [
+        "LD4B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4H"
+      ],
+      [
+        "LD4H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4W"
+      ],
+      [
+        "LD4W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4D"
+      ],
+      [
+        "LD4D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4B"
+      ],
+      [
+        "LD4B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4_vnum[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4W"
+      ],
+      [
+        "LD4W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4_vnum[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4D"
+      ],
+      [
+        "LD4D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4_vnum[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4H"
+      ],
+      [
+        "LD4H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4_vnum[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4W"
+      ],
+      [
+        "LD4W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4_vnum[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4D"
+      ],
+      [
+        "LD4D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4_vnum[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4B"
+      ],
+      [
+        "LD4B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4_vnum[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4H"
+      ],
+      [
+        "LD4H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4_vnum[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4W"
+      ],
+      [
+        "LD4W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4_vnum[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4D"
+      ],
+      [
+        "LD4D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svld4_vnum[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8x4_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LD4B"
+      ],
+      [
+        "LD4B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ],
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ],
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ],
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ],
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ],
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ],
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ],
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ],
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ],
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ],
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather[_u32base]_f32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather[_u32base]_index_f32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ],
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather[_u32base]_index_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ],
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather[_u32base]_index_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ],
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather[_u32base]_offset_f32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ],
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather[_u32base]_offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ],
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather[_u32base]_offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ],
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather[_u32base]_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather[_u32base]_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather[_u64base]_f64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather[_u64base]_index_f64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 8": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ],
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather[_u64base]_index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 8": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ],
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather[_u64base]_index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 8": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ],
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather[_u64base]_offset_f64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ],
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ],
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ],
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[s32]index[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base",
+      "svint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[s32]index[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[s32]index[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[s32]offset[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[s32]offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[s32]offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[s64]index[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[s64]index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[s64]index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[s64]offset[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[s64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[s64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[u32]index[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[u32]index[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[u32]index[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[u32]offset[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[u32]offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[u32]offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[u64]index[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[u64]index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[u64]index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[u64]offset[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[u64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_gather_[u64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_vnum[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_vnum[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_vnum[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_vnum[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_vnum[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_vnum[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_vnum[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_vnum[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_vnum[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1_vnum[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_gather[_u32base]_offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ],
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_gather[_u32base]_offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ],
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_gather[_u32base]_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_gather[_u32base]_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ],
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ],
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_gather_[s32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_gather_[s32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_gather_[s64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_gather_[s64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_gather_[u32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_gather_[u32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_gather_[u64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_gather_[u64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_s16",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ],
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ],
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ],
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_u16",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ],
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ],
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ],
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_vnum_s16",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_vnum_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_vnum_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_vnum_u16",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_vnum_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sb_vnum_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather[_u32base]_index_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ],
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather[_u32base]_index_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ],
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather[_u32base]_offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ],
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather[_u32base]_offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ],
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather[_u32base]_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather[_u32base]_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather[_u64base]_index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ],
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather[_u64base]_index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ],
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ],
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ],
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather_[s32]index_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather_[s32]index_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather_[s32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather_[s32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather_[s64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather_[s64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather_[s64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather_[s64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather_[u32]index_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather_[u32]index_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather_[u32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather_[u32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather_[u64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather_[u64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather_[u64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_gather_[u64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ],
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ],
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ],
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ],
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_vnum_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_vnum_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_vnum_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sh_vnum_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sw_gather[_u64base]_index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SW"
+      ],
+      [
+        "LDFF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sw_gather[_u64base]_index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SW"
+      ],
+      [
+        "LDFF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sw_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SW"
+      ],
+      [
+        "LDFF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sw_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SW"
+      ],
+      [
+        "LDFF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sw_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sw_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sw_gather_[s64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sw_gather_[s64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sw_gather_[s64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sw_gather_[s64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sw_gather_[u64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sw_gather_[u64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sw_gather_[u64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sw_gather_[u64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sw_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SW"
+      ],
+      [
+        "LDFF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sw_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SW"
+      ],
+      [
+        "LDFF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sw_vnum_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1sw_vnum_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_gather[_u32base]_offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ],
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_gather[_u32base]_offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ],
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_gather[_u32base]_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_gather[_u32base]_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ],
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ],
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_gather_[s32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_gather_[s32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_gather_[s64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_gather_[s64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_gather_[u32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_gather_[u32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_gather_[u64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_gather_[u64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_s16",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ],
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ],
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ],
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_u16",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ],
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ],
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ],
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_vnum_s16",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_vnum_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_vnum_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_vnum_u16",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_vnum_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1ub_vnum_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather[_u32base]_index_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ],
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather[_u32base]_index_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ],
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather[_u32base]_offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ],
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather[_u32base]_offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ],
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather[_u32base]_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather[_u32base]_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather[_u64base]_index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ],
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather[_u64base]_index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ],
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ],
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ],
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather_[s32]index_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather_[s32]index_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather_[s32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather_[s32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather_[s64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather_[s64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather_[s64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather_[s64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather_[u32]index_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather_[u32]index_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather_[u32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather_[u32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather_[u64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather_[u64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather_[u64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_gather_[u64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ],
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ],
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ],
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ],
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_vnum_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_vnum_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_vnum_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uh_vnum_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uw_gather[_u64base]_index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ],
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uw_gather[_u64base]_index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ],
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uw_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ],
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uw_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ],
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uw_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uw_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uw_gather_[s64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uw_gather_[s64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uw_gather_[s64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uw_gather_[s64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uw_gather_[u64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uw_gather_[u64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uw_gather_[u64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uw_gather_[u64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uw_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ],
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uw_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ],
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uw_vnum_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldff1uw_vnum_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDFF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1_vnum[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntw() * 4": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1W"
+      ],
+      [
+        "LDNF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1_vnum[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntd() * 8": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1D"
+      ],
+      [
+        "LDNF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1_vnum[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcnth() * 2": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1H"
+      ],
+      [
+        "LDNF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1_vnum[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntw() * 4": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1W"
+      ],
+      [
+        "LDNF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1_vnum[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntd() * 8": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1D"
+      ],
+      [
+        "LDNF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1_vnum[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntb()": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1B"
+      ],
+      [
+        "LDNF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1_vnum[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcnth() * 2": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1H"
+      ],
+      [
+        "LDNF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1_vnum[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntw() * 4": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1W"
+      ],
+      [
+        "LDNF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1_vnum[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntd() * 8": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1D"
+      ],
+      [
+        "LDNF1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1_vnum[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntb()": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1B"
+      ],
+      [
+        "LDNF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sb_s16",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sb_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sb_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sb_u16",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sb_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sb_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sb_vnum_s16",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcnth()": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SB"
+      ],
+      [
+        "LDNF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sb_vnum_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntw()": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SB"
+      ],
+      [
+        "LDNF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sb_vnum_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntd()": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SB"
+      ],
+      [
+        "LDNF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sb_vnum_u16",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcnth()": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SB"
+      ],
+      [
+        "LDNF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sb_vnum_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntw()": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SB"
+      ],
+      [
+        "LDNF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sb_vnum_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntd()": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SB"
+      ],
+      [
+        "LDNF1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sh_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sh_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sh_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sh_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sh_vnum_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntw() * 2": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SH"
+      ],
+      [
+        "LDNF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sh_vnum_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntd() * 2": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SH"
+      ],
+      [
+        "LDNF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sh_vnum_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntw() * 2": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SH"
+      ],
+      [
+        "LDNF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sh_vnum_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntd() * 2": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SH"
+      ],
+      [
+        "LDNF1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sw_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sw_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sw_vnum_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntd() * 4": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SW"
+      ],
+      [
+        "LDNF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1sw_vnum_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntd() * 4": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1SW"
+      ],
+      [
+        "LDNF1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1ub_s16",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1ub_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1ub_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1ub_u16",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1ub_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1ub_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1ub_vnum_s16",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcnth()": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1B"
+      ],
+      [
+        "LDNF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1ub_vnum_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntw()": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1B"
+      ],
+      [
+        "LDNF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1ub_vnum_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntd()": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1B"
+      ],
+      [
+        "LDNF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1ub_vnum_u16",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcnth()": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1B"
+      ],
+      [
+        "LDNF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1ub_vnum_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntw()": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1B"
+      ],
+      [
+        "LDNF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1ub_vnum_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntd()": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1B"
+      ],
+      [
+        "LDNF1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1uh_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1uh_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1uh_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1uh_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1uh_vnum_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntw() * 2": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1H"
+      ],
+      [
+        "LDNF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1uh_vnum_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntd() * 2": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1H"
+      ],
+      [
+        "LDNF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1uh_vnum_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntw() * 2": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1H"
+      ],
+      [
+        "LDNF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1uh_vnum_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntd() * 2": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1H"
+      ],
+      [
+        "LDNF1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1uw_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1uw_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1uw_vnum_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntd() * 4": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1W"
+      ],
+      [
+        "LDNF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnf1uw_vnum_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "base + vnum * svcntd() * 4": {
+        "register": "Xptr"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNF1W"
+      ],
+      [
+        "LDNF1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ],
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ],
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ],
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ],
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ],
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1B"
+      ],
+      [
+        "LDNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ],
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ],
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ],
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1B"
+      ],
+      [
+        "LDNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather[_u32base]_f32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather[_u32base]_index_f32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather[_u32base]_index_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather[_u32base]_index_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather[_u32base]_offset_f32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather[_u32base]_offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather[_u32base]_offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather[_u32base]_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather[_u32base]_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather[_u64base]_f64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather[_u64base]_index_f64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 8": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather[_u64base]_index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 8": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather[_u64base]_index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 8": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather[_u64base]_offset_f64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather_[s64]index[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 8": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather_[s64]index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 8": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather_[s64]index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 8": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather_[s64]offset[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather_[s64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather_[s64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather_[u32]offset[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather_[u32]offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather_[u32]offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather_[u64]index[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 8": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather_[u64]index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 8": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather_[u64]index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 8": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather_[u64]offset[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather_[u64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1_gather_[u64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1_vnum[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "const float32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ],
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1_vnum[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "const float64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ],
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1_vnum[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ],
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1_vnum[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ],
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1_vnum[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "const int64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ],
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1_vnum[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1B"
+      ],
+      [
+        "LDNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1_vnum[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ],
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1_vnum[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ],
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1_vnum[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint64_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1D"
+      ],
+      [
+        "LDNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svldnt1_vnum[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "int64_t vnum"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1B"
+      ],
+      [
+        "LDNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sb_gather[_u32base]_offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sb_gather[_u32base]_offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sb_gather[_u32base]_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sb_gather[_u32base]_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sb_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sb_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sb_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sb_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sb_gather_[s64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sb_gather_[s64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sb_gather_[u32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sb_gather_[u32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sb_gather_[u64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sb_gather_[u64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int8_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather[_u32base]_index_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather[_u32base]_index_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather[_u32base]_offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather[_u32base]_offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather[_u32base]_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather[_u32base]_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather[_u64base]_index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather[_u64base]_index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather_[s64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 2": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather_[s64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 2": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather_[s64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather_[s64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather_[u32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather_[u32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather_[u64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 2": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather_[u64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 2": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather_[u64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sh_gather_[u64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int16_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sw_gather[_u64base]_index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sw_gather[_u64base]_index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sw_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sw_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sw_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sw_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sw_gather_[s64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 4": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sw_gather_[s64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 4": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sw_gather_[s64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sw_gather_[s64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sw_gather_[u64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 4": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sw_gather_[u64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 4": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sw_gather_[u64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1sw_gather_[u64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const int32_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1SW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1ub_gather[_u32base]_offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1ub_gather[_u32base]_offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1ub_gather[_u32base]_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1ub_gather[_u32base]_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1ub_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1ub_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1ub_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1ub_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1ub_gather_[s64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1ub_gather_[s64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1ub_gather_[u32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1ub_gather_[u32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1ub_gather_[u64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1ub_gather_[u64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint8_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather[_u32base]_index_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather[_u32base]_index_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather[_u32base]_offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather[_u32base]_offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather[_u32base]_s32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather[_u32base]_u32",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather[_u64base]_index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather[_u64base]_index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather_[s64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 2": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather_[s64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 2": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather_[s64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather_[s64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather_[u32]offset_s32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather_[u32]offset_u32",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint32_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather_[u64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 2": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather_[u64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 2": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather_[u64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uh_gather_[u64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint16_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uw_gather[_u64base]_index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uw_gather[_u64base]_index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uw_gather[_u64base]_offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uw_gather[_u64base]_offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uw_gather[_u64base]_s64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uw_gather[_u64base]_u64",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uw_gather_[s64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 4": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uw_gather_[s64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 4": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uw_gather_[s64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uw_gather_[s64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uw_gather_[u64]index_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 4": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uw_gather_[u64]index_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices * 4": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uw_gather_[u64]offset_s64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svldnt1uw_gather_[u64]offset_u64",
+    "arguments": [
+      "svbool_t pg",
+      "const uint32_t *base",
+      "svuint64_t offsets"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LDNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlen[_f32]",
+    "arguments": [
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlen[_f64]",
+    "arguments": [
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlen[_s16]",
+    "arguments": [
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlen[_s32]",
+    "arguments": [
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlen[_s64]",
+    "arguments": [
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlen[_s8]",
+    "arguments": [
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlen[_u16]",
+    "arguments": [
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlen[_u32]",
+    "arguments": [
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlen[_u64]",
+    "arguments": [
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlen[_u8]",
+    "arguments": [
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "CNTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svlogb[_f32]_m",
+    "arguments": [
+      "svint32_t inactive",
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FLOGB"
+      ],
+      [
+        "MOVPRFX",
+        "FLOGB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svlogb[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FLOGB"
+      ],
+      [
+        "MOVPRFX",
+        "FLOGB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svlogb[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FLOGB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svlogb[_f64]_m",
+    "arguments": [
+      "svint64_t inactive",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FLOGB"
+      ],
+      [
+        "MOVPRFX",
+        "FLOGB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svlogb[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FLOGB"
+      ],
+      [
+        "MOVPRFX",
+        "FLOGB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svlogb[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FLOGB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ],
+      [
+        "LSLR"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ],
+      [
+        "LSLR"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ],
+      [
+        "LSLR"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ],
+      [
+        "LSLR"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ],
+      [
+        "LSLR"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ],
+      [
+        "LSLR"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ],
+      [
+        "LSLR"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ],
+      [
+        "LSLR"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSLR"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSLR"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSLR"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSLR"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSLR"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSLR"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSLR"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSLR"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsl_wide[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ],
+      [
+        "LSRR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSRR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ],
+      [
+        "LSRR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSRR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ],
+      [
+        "LSRR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSRR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ],
+      [
+        "LSRR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSRR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSRR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSRR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSRR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSRR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSRR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSRR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSRR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSRR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr_wide[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr_wide[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr_wide[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr_wide[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr_wide[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr_wide[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr_wide[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr_wide[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr_wide[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr_wide[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr_wide[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr_wide[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr_wide[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr_wide[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr_wide[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr_wide[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr_wide[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSR"
+      ],
+      [
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svlsr_wide[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S|Ztied3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAD"
+      ],
+      [
+        "FMAD"
+      ],
+      [
+        "FMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D|Ztied3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAD"
+      ],
+      [
+        "FMAD"
+      ],
+      [
+        "FMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]|Ztied3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAD"
+      ],
+      [
+        "FMAD"
+      ],
+      [
+        "FMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]|Ztied3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAD"
+      ],
+      [
+        "FMAD"
+      ],
+      [
+        "FMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]|Ztied3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]|Ztied3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]|Ztied3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]|Ztied3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]|Ztied3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]|Ztied3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]|Ztied3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]|Ztied3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H|Ztied3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S|Ztied3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D|Ztied3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B|Ztied3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H|Ztied3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S|Ztied3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D|Ztied3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B|Ztied3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmad[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmatch[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MATCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmatch[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MATCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmatch[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MATCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmatch[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MATCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAX"
+      ],
+      [
+        "MOVPRFX",
+        "FMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAX"
+      ],
+      [
+        "FMAX"
+      ],
+      [
+        "MOVPRFX",
+        "FMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMAX"
+      ],
+      [
+        "MOVPRFX",
+        "FMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAX"
+      ],
+      [
+        "MOVPRFX",
+        "FMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAX"
+      ],
+      [
+        "FMAX"
+      ],
+      [
+        "MOVPRFX",
+        "FMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMAX"
+      ],
+      [
+        "MOVPRFX",
+        "FMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAX"
+      ],
+      [
+        "FMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAX"
+      ],
+      [
+        "FMAX"
+      ],
+      [
+        "FMAX"
+      ],
+      [
+        "MOVPRFX",
+        "FMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMAX"
+      ],
+      [
+        "MOVPRFX",
+        "FMAX"
+      ],
+      [
+        "MOVPRFX",
+        "FMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAX"
+      ],
+      [
+        "FMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAX"
+      ],
+      [
+        "FMAX"
+      ],
+      [
+        "FMAX"
+      ],
+      [
+        "MOVPRFX",
+        "FMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMAX"
+      ],
+      [
+        "MOVPRFX",
+        "FMAX"
+      ],
+      [
+        "MOVPRFX",
+        "FMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAX"
+      ],
+      [
+        "SMAX"
+      ],
+      [
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAX"
+      ],
+      [
+        "SMAX"
+      ],
+      [
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAX"
+      ],
+      [
+        "SMAX"
+      ],
+      [
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAX"
+      ],
+      [
+        "SMAX"
+      ],
+      [
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAX"
+      ],
+      [
+        "UMAX"
+      ],
+      [
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAX"
+      ],
+      [
+        "UMAX"
+      ],
+      [
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAX"
+      ],
+      [
+        "UMAX"
+      ],
+      [
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAX"
+      ],
+      [
+        "UMAX"
+      ],
+      [
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAX"
+      ],
+      [
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAX"
+      ],
+      [
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAX"
+      ],
+      [
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAX"
+      ],
+      [
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMAX"
+      ],
+      [
+        "MOVPRFX",
+        "SMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAX"
+      ],
+      [
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAX"
+      ],
+      [
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAX"
+      ],
+      [
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAX"
+      ],
+      [
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmax[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMAX"
+      ],
+      [
+        "MOVPRFX",
+        "UMAX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxnm[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxnm[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXNM"
+      ],
+      [
+        "FMAXNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxnm[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMAXNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxnm[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxnm[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXNM"
+      ],
+      [
+        "FMAXNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxnm[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMAXNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxnm[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXNM"
+      ],
+      [
+        "FMAXNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxnm[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXNM"
+      ],
+      [
+        "FMAXNM"
+      ],
+      [
+        "FMAXNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxnm[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMAXNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxnm[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXNM"
+      ],
+      [
+        "FMAXNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxnm[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXNM"
+      ],
+      [
+        "FMAXNM"
+      ],
+      [
+        "FMAXNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxnm[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMAXNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxnmp[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXNMP"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXNMP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxnmp[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXNMP"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXNMP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxnmp[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXNMP"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXNMP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxnmp[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXNMP"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXNMP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxnmv[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "float32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXNMV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxnmv[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "float64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXNMV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "FMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "SMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "SMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "SMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "SMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "SMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "SMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "SMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "SMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "UMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "UMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "UMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "UMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "UMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "UMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "UMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmaxp[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAXP"
+      ],
+      [
+        "MOVPRFX",
+        "UMAXP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxv[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "float32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxv[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "float64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMAXV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxv[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "int16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAXV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxv[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAXV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxv[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAXV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxv[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "int8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMAXV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxv[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "uint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAXV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxv[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAXV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxv[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAXV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmaxv[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "uint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMAXV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMIN"
+      ],
+      [
+        "MOVPRFX",
+        "FMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMIN"
+      ],
+      [
+        "FMIN"
+      ],
+      [
+        "MOVPRFX",
+        "FMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMIN"
+      ],
+      [
+        "MOVPRFX",
+        "FMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMIN"
+      ],
+      [
+        "MOVPRFX",
+        "FMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMIN"
+      ],
+      [
+        "FMIN"
+      ],
+      [
+        "MOVPRFX",
+        "FMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMIN"
+      ],
+      [
+        "MOVPRFX",
+        "FMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMIN"
+      ],
+      [
+        "FMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMIN"
+      ],
+      [
+        "FMIN"
+      ],
+      [
+        "FMIN"
+      ],
+      [
+        "MOVPRFX",
+        "FMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMIN"
+      ],
+      [
+        "MOVPRFX",
+        "FMIN"
+      ],
+      [
+        "MOVPRFX",
+        "FMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMIN"
+      ],
+      [
+        "FMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMIN"
+      ],
+      [
+        "FMIN"
+      ],
+      [
+        "FMIN"
+      ],
+      [
+        "MOVPRFX",
+        "FMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMIN"
+      ],
+      [
+        "MOVPRFX",
+        "FMIN"
+      ],
+      [
+        "MOVPRFX",
+        "FMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMIN"
+      ],
+      [
+        "SMIN"
+      ],
+      [
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMIN"
+      ],
+      [
+        "SMIN"
+      ],
+      [
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMIN"
+      ],
+      [
+        "SMIN"
+      ],
+      [
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMIN"
+      ],
+      [
+        "SMIN"
+      ],
+      [
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMIN"
+      ],
+      [
+        "UMIN"
+      ],
+      [
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMIN"
+      ],
+      [
+        "UMIN"
+      ],
+      [
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMIN"
+      ],
+      [
+        "UMIN"
+      ],
+      [
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMIN"
+      ],
+      [
+        "UMIN"
+      ],
+      [
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMIN"
+      ],
+      [
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMIN"
+      ],
+      [
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMIN"
+      ],
+      [
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMIN"
+      ],
+      [
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMIN"
+      ],
+      [
+        "MOVPRFX",
+        "SMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMIN"
+      ],
+      [
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMIN"
+      ],
+      [
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMIN"
+      ],
+      [
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMIN"
+      ],
+      [
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmin[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMIN"
+      ],
+      [
+        "MOVPRFX",
+        "UMIN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminnm[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMINNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminnm[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINNM"
+      ],
+      [
+        "FMINNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMINNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminnm[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMINNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMINNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminnm[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMINNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminnm[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINNM"
+      ],
+      [
+        "FMINNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMINNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminnm[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMINNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMINNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminnm[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINNM"
+      ],
+      [
+        "FMINNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminnm[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINNM"
+      ],
+      [
+        "FMINNM"
+      ],
+      [
+        "FMINNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMINNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminnm[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMINNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMINNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMINNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminnm[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINNM"
+      ],
+      [
+        "FMINNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminnm[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINNM"
+      ],
+      [
+        "FMINNM"
+      ],
+      [
+        "FMINNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMINNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminnm[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMINNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMINNM"
+      ],
+      [
+        "MOVPRFX",
+        "FMINNM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminnmp[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINNMP"
+      ],
+      [
+        "MOVPRFX",
+        "FMINNMP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminnmp[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINNMP"
+      ],
+      [
+        "MOVPRFX",
+        "FMINNMP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminnmp[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINNMP"
+      ],
+      [
+        "MOVPRFX",
+        "FMINNMP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminnmp[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINNMP"
+      ],
+      [
+        "MOVPRFX",
+        "FMINNMP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminnmv[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "float32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINNMV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminnmv[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "float64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINNMV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINP"
+      ],
+      [
+        "MOVPRFX",
+        "FMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINP"
+      ],
+      [
+        "MOVPRFX",
+        "FMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINP"
+      ],
+      [
+        "MOVPRFX",
+        "FMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINP"
+      ],
+      [
+        "MOVPRFX",
+        "FMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMINP"
+      ],
+      [
+        "MOVPRFX",
+        "SMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMINP"
+      ],
+      [
+        "MOVPRFX",
+        "SMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMINP"
+      ],
+      [
+        "MOVPRFX",
+        "SMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMINP"
+      ],
+      [
+        "MOVPRFX",
+        "SMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMINP"
+      ],
+      [
+        "MOVPRFX",
+        "SMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMINP"
+      ],
+      [
+        "MOVPRFX",
+        "SMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMINP"
+      ],
+      [
+        "MOVPRFX",
+        "SMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMINP"
+      ],
+      [
+        "MOVPRFX",
+        "SMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMINP"
+      ],
+      [
+        "MOVPRFX",
+        "UMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMINP"
+      ],
+      [
+        "MOVPRFX",
+        "UMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMINP"
+      ],
+      [
+        "MOVPRFX",
+        "UMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMINP"
+      ],
+      [
+        "MOVPRFX",
+        "UMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMINP"
+      ],
+      [
+        "MOVPRFX",
+        "UMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMINP"
+      ],
+      [
+        "MOVPRFX",
+        "UMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMINP"
+      ],
+      [
+        "MOVPRFX",
+        "UMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svminp[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMINP"
+      ],
+      [
+        "MOVPRFX",
+        "UMINP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminv[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "float32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminv[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "float64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMINV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminv[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "int16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMINV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminv[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMINV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminv[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMINV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminv[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "int8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMINV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminv[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "uint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMINV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminv[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMINV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminv[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMINV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svminv[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "uint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMINV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S|Ztied3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLA"
+      ],
+      [
+        "FMAD"
+      ],
+      [
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D|Ztied3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLA"
+      ],
+      [
+        "FMAD"
+      ],
+      [
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]|Ztied3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLA"
+      ],
+      [
+        "FMAD"
+      ],
+      [
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]|Ztied3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLA"
+      ],
+      [
+        "FMAD"
+      ],
+      [
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]|Ztied3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]|Ztied3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]|Ztied3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]|Ztied3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]|Ztied3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]|Ztied3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]|Ztied3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]|Ztied3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H|Ztied3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S|Ztied3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D|Ztied3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B|Ztied3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H|Ztied3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S|Ztied3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D|Ztied3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B|Ztied3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ],
+      [
+        "MOVPRFX",
+        "MAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla_lane[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmla_lane[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmla_lane[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmla_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmla_lane[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmla_lane[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmla_lane[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmla_lane[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLA"
+      ],
+      [
+        "MOVPRFX",
+        "MLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalb[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "SMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalb[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "SMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalb[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "SMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalb[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "UMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalb[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "UMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalb[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "UMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalb[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "SMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalb[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "SMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalb[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "SMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalb[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "UMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalb[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "UMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalb[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "UMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalb_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "SMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalb_lane[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "SMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalb_lane[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "UMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalb_lane[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "UMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalt[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "SMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalt[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "SMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalt[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "SMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalt[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "UMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalt[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "UMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalt[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "UMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalt[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "SMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalt[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "SMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalt[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "SMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalt[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "UMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalt[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "UMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalt[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "UMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalt_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "SMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalt_lane[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "SMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalt_lane[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "UMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlalt_lane[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "UMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S|Ztied3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLS"
+      ],
+      [
+        "FMSB"
+      ],
+      [
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D|Ztied3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLS"
+      ],
+      [
+        "FMSB"
+      ],
+      [
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]|Ztied3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLS"
+      ],
+      [
+        "FMSB"
+      ],
+      [
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]|Ztied3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLS"
+      ],
+      [
+        "FMSB"
+      ],
+      [
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]|Ztied3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]|Ztied3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]|Ztied3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]|Ztied3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]|Ztied3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]|Ztied3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]|Ztied3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]|Ztied3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H|Ztied3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S|Ztied3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D|Ztied3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B|Ztied3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H|Ztied3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S|Ztied3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D|Ztied3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B|Ztied3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls_lane[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmls_lane[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmls_lane[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmls_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmls_lane[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmls_lane[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmls_lane[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmls_lane[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslb[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "SMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslb[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "SMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslb[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "SMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslb[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "UMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslb[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "UMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslb[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "UMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslb[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "SMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslb[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "SMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslb[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "SMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslb[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "UMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslb[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "UMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslb[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "UMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslb_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "SMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslb_lane[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "SMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslb_lane[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "UMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslb_lane[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "UMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslt[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "SMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslt[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "SMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslt[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "SMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslt[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "UMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslt[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "UMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslt[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "UMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslt[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "SMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslt[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "SMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslt[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "SMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslt[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "UMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslt[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "UMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslt[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "UMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslt_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "SMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslt_lane[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "SMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslt_lane[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "UMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmlslt_lane[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "UMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmmla[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FMMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmmla[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FMMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmmla[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMMLA"
+      ],
+      [
+        "MOVPRFX",
+        "SMMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmmla[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMMLA"
+      ],
+      [
+        "MOVPRFX",
+        "UMMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmov[_b]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Pop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "AND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmovlb[_s16]",
+    "arguments": [
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSHLLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmovlb[_s32]",
+    "arguments": [
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSHLLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmovlb[_s64]",
+    "arguments": [
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSHLLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmovlb[_u16]",
+    "arguments": [
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USHLLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmovlb[_u32]",
+    "arguments": [
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USHLLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmovlb[_u64]",
+    "arguments": [
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USHLLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmovlt[_s16]",
+    "arguments": [
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSHLLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmovlt[_s32]",
+    "arguments": [
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSHLLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmovlt[_s64]",
+    "arguments": [
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSHLLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmovlt[_u16]",
+    "arguments": [
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USHLLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmovlt[_u32]",
+    "arguments": [
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USHLLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmovlt[_u64]",
+    "arguments": [
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USHLLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S|Ztied3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMSB"
+      ],
+      [
+        "FMSB"
+      ],
+      [
+        "FMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D|Ztied3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMSB"
+      ],
+      [
+        "FMSB"
+      ],
+      [
+        "FMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]|Ztied3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMSB"
+      ],
+      [
+        "FMSB"
+      ],
+      [
+        "FMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]|Ztied3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMSB"
+      ],
+      [
+        "FMSB"
+      ],
+      [
+        "FMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]|Ztied3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]|Ztied3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]|Ztied3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]|Ztied3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]|Ztied3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]|Ztied3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]|Ztied3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]|Ztied3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H|Ztied3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S|Ztied3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D|Ztied3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B|Ztied3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "op3": {
+        "register": "Zop3.H|Ztied3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S|Ztied3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D|Ztied3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "op3": {
+        "register": "Zop3.B|Ztied3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MSB"
+      ],
+      [
+        "MSB"
+      ],
+      [
+        "MLS"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmsb[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MSB"
+      ],
+      [
+        "MOVPRFX",
+        "MLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMUL"
+      ],
+      [
+        "MOVPRFX",
+        "FMUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMUL"
+      ],
+      [
+        "FMUL"
+      ],
+      [
+        "FMUL"
+      ],
+      [
+        "MOVPRFX",
+        "FMUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMUL"
+      ],
+      [
+        "MOVPRFX",
+        "FMUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMUL"
+      ],
+      [
+        "MOVPRFX",
+        "FMUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMUL"
+      ],
+      [
+        "FMUL"
+      ],
+      [
+        "FMUL"
+      ],
+      [
+        "MOVPRFX",
+        "FMUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMUL"
+      ],
+      [
+        "MOVPRFX",
+        "FMUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMUL"
+      ],
+      [
+        "FMUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMUL"
+      ],
+      [
+        "FMUL"
+      ],
+      [
+        "FMUL"
+      ],
+      [
+        "FMUL"
+      ],
+      [
+        "MOVPRFX",
+        "FMUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMUL"
+      ],
+      [
+        "MOVPRFX",
+        "FMUL"
+      ],
+      [
+        "MOVPRFX",
+        "FMUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMUL"
+      ],
+      [
+        "FMUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMUL"
+      ],
+      [
+        "FMUL"
+      ],
+      [
+        "FMUL"
+      ],
+      [
+        "FMUL"
+      ],
+      [
+        "MOVPRFX",
+        "FMUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMUL"
+      ],
+      [
+        "MOVPRFX",
+        "FMUL"
+      ],
+      [
+        "MOVPRFX",
+        "FMUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ],
+      [
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "MUL"
+      ],
+      [
+        "MOVPRFX",
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul_lane[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmul_lane[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmul_lane[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmul_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmul_lane[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmul_lane[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmul_lane[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmul_lane[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULH"
+      ],
+      [
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULH"
+      ],
+      [
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULH"
+      ],
+      [
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULH"
+      ],
+      [
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULH"
+      ],
+      [
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULH"
+      ],
+      [
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULH"
+      ],
+      [
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULH"
+      ],
+      [
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULH"
+      ],
+      [
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULH"
+      ],
+      [
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULH"
+      ],
+      [
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULH"
+      ],
+      [
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SMULH"
+      ],
+      [
+        "MOVPRFX",
+        "SMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULH"
+      ],
+      [
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULH"
+      ],
+      [
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULH"
+      ],
+      [
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULH"
+      ],
+      [
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulh[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UMULH"
+      ],
+      [
+        "MOVPRFX",
+        "UMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullb[_n_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullb[_n_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullb[_n_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullb[_n_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullb[_n_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullb[_n_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullb[_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullb[_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullb[_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullb[_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullb[_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullb[_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullb_lane[_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullb_lane[_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullb_lane[_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullb_lane[_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullt[_n_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullt[_n_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullt[_n_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullt[_n_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullt[_n_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullt[_n_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullt[_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullt[_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullt[_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullt[_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullt[_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullt[_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullt_lane[_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullt_lane[_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullt_lane[_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svmullt_lane[_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulx[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMULX"
+      ],
+      [
+        "MOVPRFX",
+        "FMULX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulx[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMULX"
+      ],
+      [
+        "FMULX"
+      ],
+      [
+        "MOVPRFX",
+        "FMULX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulx[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMULX"
+      ],
+      [
+        "MOVPRFX",
+        "FMULX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulx[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMULX"
+      ],
+      [
+        "MOVPRFX",
+        "FMULX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulx[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMULX"
+      ],
+      [
+        "FMULX"
+      ],
+      [
+        "MOVPRFX",
+        "FMULX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulx[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMULX"
+      ],
+      [
+        "MOVPRFX",
+        "FMULX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulx[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMULX"
+      ],
+      [
+        "MOVPRFX",
+        "FMULX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulx[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMULX"
+      ],
+      [
+        "FMULX"
+      ],
+      [
+        "MOVPRFX",
+        "FMULX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulx[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMULX"
+      ],
+      [
+        "MOVPRFX",
+        "FMULX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulx[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMULX"
+      ],
+      [
+        "MOVPRFX",
+        "FMULX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulx[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FMULX"
+      ],
+      [
+        "FMULX"
+      ],
+      [
+        "MOVPRFX",
+        "FMULX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svmulx[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FMULX"
+      ],
+      [
+        "MOVPRFX",
+        "FMULX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnand[_b]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.B"
+      },
+      "op2": {
+        "register": "Pop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NAND"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnbsl[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NBSL"
+      ],
+      [
+        "MOVPRFX",
+        "NBSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnbsl[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NBSL"
+      ],
+      [
+        "MOVPRFX",
+        "NBSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnbsl[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NBSL"
+      ],
+      [
+        "MOVPRFX",
+        "NBSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnbsl[_n_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NBSL"
+      ],
+      [
+        "MOVPRFX",
+        "NBSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnbsl[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NBSL"
+      ],
+      [
+        "MOVPRFX",
+        "NBSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnbsl[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NBSL"
+      ],
+      [
+        "MOVPRFX",
+        "NBSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnbsl[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NBSL"
+      ],
+      [
+        "MOVPRFX",
+        "NBSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnbsl[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NBSL"
+      ],
+      [
+        "MOVPRFX",
+        "NBSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnbsl[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NBSL"
+      ],
+      [
+        "MOVPRFX",
+        "NBSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnbsl[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NBSL"
+      ],
+      [
+        "MOVPRFX",
+        "NBSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnbsl[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NBSL"
+      ],
+      [
+        "MOVPRFX",
+        "NBSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnbsl[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NBSL"
+      ],
+      [
+        "MOVPRFX",
+        "NBSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnbsl[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "svuint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NBSL"
+      ],
+      [
+        "MOVPRFX",
+        "NBSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnbsl[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NBSL"
+      ],
+      [
+        "MOVPRFX",
+        "NBSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnbsl[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NBSL"
+      ],
+      [
+        "MOVPRFX",
+        "NBSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnbsl[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NBSL"
+      ],
+      [
+        "MOVPRFX",
+        "NBSL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svneg[_f32]_m",
+    "arguments": [
+      "svfloat32_t inactive",
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNEG"
+      ],
+      [
+        "MOVPRFX",
+        "FNEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svneg[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNEG"
+      ],
+      [
+        "MOVPRFX",
+        "FNEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svneg[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FNEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svneg[_f64]_m",
+    "arguments": [
+      "svfloat64_t inactive",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNEG"
+      ],
+      [
+        "MOVPRFX",
+        "FNEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svneg[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNEG"
+      ],
+      [
+        "MOVPRFX",
+        "FNEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svneg[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FNEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svneg[_s16]_m",
+    "arguments": [
+      "svint16_t inactive",
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.H|Ztied.H"
+      },
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NEG"
+      ],
+      [
+        "MOVPRFX",
+        "NEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svneg[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NEG"
+      ],
+      [
+        "MOVPRFX",
+        "NEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svneg[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "NEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svneg[_s32]_m",
+    "arguments": [
+      "svint32_t inactive",
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NEG"
+      ],
+      [
+        "MOVPRFX",
+        "NEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svneg[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NEG"
+      ],
+      [
+        "MOVPRFX",
+        "NEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svneg[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "NEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svneg[_s64]_m",
+    "arguments": [
+      "svint64_t inactive",
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NEG"
+      ],
+      [
+        "MOVPRFX",
+        "NEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svneg[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NEG"
+      ],
+      [
+        "MOVPRFX",
+        "NEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svneg[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "NEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svneg[_s8]_m",
+    "arguments": [
+      "svint8_t inactive",
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.B|Ztied.B"
+      },
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NEG"
+      ],
+      [
+        "MOVPRFX",
+        "NEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svneg[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B|Ztied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NEG"
+      ],
+      [
+        "MOVPRFX",
+        "NEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svneg[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "NEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmad[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmad[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S|Ztied3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMAD"
+      ],
+      [
+        "FNMAD"
+      ],
+      [
+        "FNMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmad[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmad[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmad[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D|Ztied3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMAD"
+      ],
+      [
+        "FNMAD"
+      ],
+      [
+        "FNMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmad[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmad[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmad[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]|Ztied3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMAD"
+      ],
+      [
+        "FNMAD"
+      ],
+      [
+        "FNMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmad[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmad[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmad[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]|Ztied3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMAD"
+      ],
+      [
+        "FNMAD"
+      ],
+      [
+        "FNMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmad[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnmatch[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NMATCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnmatch[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NMATCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnmatch[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NMATCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svnmatch[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NMATCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmla[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmla[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S|Ztied3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMLA"
+      ],
+      [
+        "FNMAD"
+      ],
+      [
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmla[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FNMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmla[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmla[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D|Ztied3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMLA"
+      ],
+      [
+        "FNMAD"
+      ],
+      [
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmla[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FNMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmla[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmla[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]|Ztied3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMLA"
+      ],
+      [
+        "FNMAD"
+      ],
+      [
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmla[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FNMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmla[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmla[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]|Ztied3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMLA"
+      ],
+      [
+        "FNMAD"
+      ],
+      [
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmla[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FNMLA"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FNMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmls[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmls[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S|Ztied3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMLS"
+      ],
+      [
+        "FNMSB"
+      ],
+      [
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmls[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FNMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmls[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmls[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D|Ztied3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMLS"
+      ],
+      [
+        "FNMSB"
+      ],
+      [
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmls[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FNMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmls[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmls[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]|Ztied3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMLS"
+      ],
+      [
+        "FNMSB"
+      ],
+      [
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmls[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FNMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmls[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmls[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]|Ztied3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMLS"
+      ],
+      [
+        "FNMSB"
+      ],
+      [
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmls[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FNMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmsb[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmsb[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S|Ztied3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMSB"
+      ],
+      [
+        "FNMSB"
+      ],
+      [
+        "FNMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmsb[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "svfloat32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmsb[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmsb[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D|Ztied3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMSB"
+      ],
+      [
+        "FNMSB"
+      ],
+      [
+        "FNMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmsb[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "svfloat64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmsb[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmsb[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]|Ztied3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMSB"
+      ],
+      [
+        "FNMSB"
+      ],
+      [
+        "FNMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmsb[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "float32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmsb[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmsb[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]|Ztied3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FNMSB"
+      ],
+      [
+        "FNMSB"
+      ],
+      [
+        "FNMLS"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnmsb[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "float64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMSB"
+      ],
+      [
+        "MOVPRFX",
+        "FNMLS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnor[_b]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.B"
+      },
+      "op2": {
+        "register": "Pop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_b]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Pop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "EOR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_s16]_m",
+    "arguments": [
+      "svint16_t inactive",
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.H|Ztied.H"
+      },
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NOT"
+      ],
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NOT"
+      ],
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_s32]_m",
+    "arguments": [
+      "svint32_t inactive",
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NOT"
+      ],
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NOT"
+      ],
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_s64]_m",
+    "arguments": [
+      "svint64_t inactive",
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NOT"
+      ],
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NOT"
+      ],
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_s8]_m",
+    "arguments": [
+      "svint8_t inactive",
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.B|Ztied.B"
+      },
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NOT"
+      ],
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B|Ztied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NOT"
+      ],
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_u16]_m",
+    "arguments": [
+      "svuint16_t inactive",
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.H|Ztied.H"
+      },
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NOT"
+      ],
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NOT"
+      ],
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_u32]_m",
+    "arguments": [
+      "svuint32_t inactive",
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NOT"
+      ],
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NOT"
+      ],
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_u64]_m",
+    "arguments": [
+      "svuint64_t inactive",
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NOT"
+      ],
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NOT"
+      ],
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_u8]_m",
+    "arguments": [
+      "svuint8_t inactive",
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.B|Ztied.B"
+      },
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NOT"
+      ],
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B|Ztied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "NOT"
+      ],
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svnot[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "NOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorn[_b]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.B"
+      },
+      "op2": {
+        "register": "Pop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_b]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.B"
+      },
+      "op2": {
+        "register": "Pop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ],
+      [
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorr[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "ORR"
+      ],
+      [
+        "MOVPRFX",
+        "ORR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorv[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "int16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorv[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorv[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorv[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "int8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorv[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "uint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorv[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorv[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svorv[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "uint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ORV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svpfalse[_b]",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PFALSE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svpfirst[_b]",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Ptied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PFIRST"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmul[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmul[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullb[_n_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullb[_n_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullb[_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullb[_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullb_pair[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullb_pair[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullb_pair[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullb_pair[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullb_pair[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullb_pair[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullt[_n_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullt[_n_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullt[_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullt[_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullt_pair[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullt_pair[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullt_pair[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullt_pair[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullt_pair[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svpmullt_pair[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svpnext_b16",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Ptied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PNEXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svpnext_b32",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Ptied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PNEXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svpnext_b64",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Ptied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PNEXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svpnext_b8",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Ptied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PNEXT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfb",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFB"
+      ],
+      [
+        "PRFB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfb_gather[_u32base]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfb_gather[_u32base]_offset",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFB"
+      ],
+      [
+        "PRFB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfb_gather[_u64base]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfb_gather[_u64base]_offset",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFB"
+      ],
+      [
+        "PRFB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfb_gather_[s32]offset",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "svint32_t offsets",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfb_gather_[s64]offset",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "svint64_t offsets",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfb_gather_[u32]offset",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "svuint32_t offsets",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfb_gather_[u64]offset",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "svuint64_t offsets",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfb_vnum",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "int64_t vnum",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFB"
+      ],
+      [
+        "PRFB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfd",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFD"
+      ],
+      [
+        "PRFD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfd_gather[_u32base]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfd_gather[_u32base]_index",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 8": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFD"
+      ],
+      [
+        "PRFB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfd_gather[_u64base]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfd_gather[_u64base]_index",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 8": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFD"
+      ],
+      [
+        "PRFB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfd_gather_[s32]index",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "svint32_t indices",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfd_gather_[s64]index",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "svint64_t indices",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfd_gather_[u32]index",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "svuint32_t indices",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfd_gather_[u64]index",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "svuint64_t indices",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfd_vnum",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "int64_t vnum",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFD"
+      ],
+      [
+        "PRFD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfh",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFH"
+      ],
+      [
+        "PRFH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfh_gather[_u32base]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfh_gather[_u32base]_index",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFH"
+      ],
+      [
+        "PRFB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfh_gather[_u64base]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfh_gather[_u64base]_index",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFH"
+      ],
+      [
+        "PRFB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfh_gather_[s32]index",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "svint32_t indices",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfh_gather_[s64]index",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "svint64_t indices",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfh_gather_[u32]index",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "svuint32_t indices",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfh_gather_[u64]index",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "svuint64_t indices",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfh_vnum",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "int64_t vnum",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFH"
+      ],
+      [
+        "PRFH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfw",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFW"
+      ],
+      [
+        "PRFW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfw_gather[_u32base]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfw_gather[_u32base]_index",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFW"
+      ],
+      [
+        "PRFB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfw_gather[_u64base]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfw_gather[_u64base]_index",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFW"
+      ],
+      [
+        "PRFB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfw_gather_[s32]index",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "svint32_t indices",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfw_gather_[s64]index",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "svint64_t indices",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfw_gather_[u32]index",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "svuint32_t indices",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfw_gather_[u64]index",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "svuint64_t indices",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svprfw_vnum",
+    "arguments": [
+      "svbool_t pg",
+      "const void *base",
+      "int64_t vnum",
+      "enum svprfop op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PRFW"
+      ],
+      [
+        "PRFW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svptest_any",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "bool"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svptest_first",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "bool"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svptest_last",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "bool"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svptrue_b16",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PTRUE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svptrue_b32",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PTRUE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svptrue_b64",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PTRUE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svptrue_b8",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PTRUE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svptrue_pat_b16",
+    "arguments": [
+      "enum svpattern pattern"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PTRUE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svptrue_pat_b32",
+    "arguments": [
+      "enum svpattern pattern"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PTRUE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svptrue_pat_b64",
+    "arguments": [
+      "enum svpattern pattern"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PTRUE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svptrue_pat_b8",
+    "arguments": [
+      "enum svpattern pattern"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PTRUE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqabs[_s16]_m",
+    "arguments": [
+      "svint16_t inactive",
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.H|Ztied.H"
+      },
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQABS"
+      ],
+      [
+        "MOVPRFX",
+        "SQABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqabs[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQABS"
+      ],
+      [
+        "MOVPRFX",
+        "SQABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqabs[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqabs[_s32]_m",
+    "arguments": [
+      "svint32_t inactive",
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQABS"
+      ],
+      [
+        "MOVPRFX",
+        "SQABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqabs[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQABS"
+      ],
+      [
+        "MOVPRFX",
+        "SQABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqabs[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqabs[_s64]_m",
+    "arguments": [
+      "svint64_t inactive",
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQABS"
+      ],
+      [
+        "MOVPRFX",
+        "SQABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqabs[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQABS"
+      ],
+      [
+        "MOVPRFX",
+        "SQABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqabs[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqabs[_s8]_m",
+    "arguments": [
+      "svint8_t inactive",
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.B|Ztied.B"
+      },
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQABS"
+      ],
+      [
+        "MOVPRFX",
+        "SQABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqabs[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B|Ztied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQABS"
+      ],
+      [
+        "MOVPRFX",
+        "SQABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqabs[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQABS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqadd[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqadd[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqadd[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqadd[_n_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqadd[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "MOVPRFX",
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQADD"
+      ],
+      [
+        "MOVPRFX",
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqadd[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "MOVPRFX",
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQADD"
+      ],
+      [
+        "MOVPRFX",
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqadd[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "MOVPRFX",
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQADD"
+      ],
+      [
+        "MOVPRFX",
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqadd[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "MOVPRFX",
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQADD"
+      ],
+      [
+        "MOVPRFX",
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqadd[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqadd[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqadd[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqadd[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqadd[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "MOVPRFX",
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQADD"
+      ],
+      [
+        "MOVPRFX",
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqadd[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "MOVPRFX",
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQADD"
+      ],
+      [
+        "MOVPRFX",
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqadd[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "MOVPRFX",
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQADD"
+      ],
+      [
+        "MOVPRFX",
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqadd[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "MOVPRFX",
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ],
+      [
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqadd[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQADD"
+      ],
+      [
+        "MOVPRFX",
+        "UQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqcadd[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQCADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQCADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqcadd[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQCADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQCADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqcadd[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQCADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQCADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqcadd[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQCADD"
+      ],
+      [
+        "MOVPRFX",
+        "SQCADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecb[_n_s32]",
+    "arguments": [
+      "int32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecb[_n_s64]",
+    "arguments": [
+      "int64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecb[_n_u32]",
+    "arguments": [
+      "uint32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecb[_n_u64]",
+    "arguments": [
+      "uint64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecb_pat[_n_s32]",
+    "arguments": [
+      "int32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecb_pat[_n_s64]",
+    "arguments": [
+      "int64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecb_pat[_n_u32]",
+    "arguments": [
+      "uint32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecb_pat[_n_u64]",
+    "arguments": [
+      "uint64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecd[_n_s32]",
+    "arguments": [
+      "int32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecd[_n_s64]",
+    "arguments": [
+      "int64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecd[_n_u32]",
+    "arguments": [
+      "uint32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecd[_n_u64]",
+    "arguments": [
+      "uint64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecd[_s64]",
+    "arguments": [
+      "svint64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECD"
+      ],
+      [
+        "MOVPRFX",
+        "SQDECD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecd[_u64]",
+    "arguments": [
+      "svuint64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECD"
+      ],
+      [
+        "MOVPRFX",
+        "UQDECD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecd_pat[_n_s32]",
+    "arguments": [
+      "int32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecd_pat[_n_s64]",
+    "arguments": [
+      "int64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecd_pat[_n_u32]",
+    "arguments": [
+      "uint32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecd_pat[_n_u64]",
+    "arguments": [
+      "uint64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecd_pat[_s64]",
+    "arguments": [
+      "svint64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECD"
+      ],
+      [
+        "MOVPRFX",
+        "SQDECD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecd_pat[_u64]",
+    "arguments": [
+      "svuint64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECD"
+      ],
+      [
+        "MOVPRFX",
+        "UQDECD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdech[_n_s32]",
+    "arguments": [
+      "int32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdech[_n_s64]",
+    "arguments": [
+      "int64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdech[_n_u32]",
+    "arguments": [
+      "uint32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdech[_n_u64]",
+    "arguments": [
+      "uint64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdech[_s16]",
+    "arguments": [
+      "svint16_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECH"
+      ],
+      [
+        "MOVPRFX",
+        "SQDECH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdech[_u16]",
+    "arguments": [
+      "svuint16_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECH"
+      ],
+      [
+        "MOVPRFX",
+        "UQDECH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdech_pat[_n_s32]",
+    "arguments": [
+      "int32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdech_pat[_n_s64]",
+    "arguments": [
+      "int64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdech_pat[_n_u32]",
+    "arguments": [
+      "uint32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdech_pat[_n_u64]",
+    "arguments": [
+      "uint64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdech_pat[_s16]",
+    "arguments": [
+      "svint16_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECH"
+      ],
+      [
+        "MOVPRFX",
+        "SQDECH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdech_pat[_u16]",
+    "arguments": [
+      "svuint16_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECH"
+      ],
+      [
+        "MOVPRFX",
+        "UQDECH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_n_s32]_b16",
+    "arguments": [
+      "int32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Wtied"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_n_s32]_b32",
+    "arguments": [
+      "int32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Wtied"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_n_s32]_b64",
+    "arguments": [
+      "int32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Wtied"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_n_s32]_b8",
+    "arguments": [
+      "int32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Wtied"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_n_s64]_b16",
+    "arguments": [
+      "int64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Xtied"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_n_s64]_b32",
+    "arguments": [
+      "int64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Xtied"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_n_s64]_b64",
+    "arguments": [
+      "int64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Xtied"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_n_s64]_b8",
+    "arguments": [
+      "int64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Xtied"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_n_u32]_b16",
+    "arguments": [
+      "uint32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Wtied"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_n_u32]_b32",
+    "arguments": [
+      "uint32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Wtied"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_n_u32]_b64",
+    "arguments": [
+      "uint32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Wtied"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_n_u32]_b8",
+    "arguments": [
+      "uint32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Wtied"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_n_u64]_b16",
+    "arguments": [
+      "uint64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Xtied"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_n_u64]_b32",
+    "arguments": [
+      "uint64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Xtied"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_n_u64]_b64",
+    "arguments": [
+      "uint64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Xtied"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_n_u64]_b8",
+    "arguments": [
+      "uint64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Xtied"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_s16]",
+    "arguments": [
+      "svint16_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECP"
+      ],
+      [
+        "MOVPRFX",
+        "SQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_s32]",
+    "arguments": [
+      "svint32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECP"
+      ],
+      [
+        "MOVPRFX",
+        "SQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_s64]",
+    "arguments": [
+      "svint64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECP"
+      ],
+      [
+        "MOVPRFX",
+        "SQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_u16]",
+    "arguments": [
+      "svuint16_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECP"
+      ],
+      [
+        "MOVPRFX",
+        "UQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_u32]",
+    "arguments": [
+      "svuint32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECP"
+      ],
+      [
+        "MOVPRFX",
+        "UQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecp[_u64]",
+    "arguments": [
+      "svuint64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECP"
+      ],
+      [
+        "MOVPRFX",
+        "UQDECP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecw[_n_s32]",
+    "arguments": [
+      "int32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecw[_n_s64]",
+    "arguments": [
+      "int64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecw[_n_u32]",
+    "arguments": [
+      "uint32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecw[_n_u64]",
+    "arguments": [
+      "uint64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecw[_s32]",
+    "arguments": [
+      "svint32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECW"
+      ],
+      [
+        "MOVPRFX",
+        "SQDECW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecw[_u32]",
+    "arguments": [
+      "svuint32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECW"
+      ],
+      [
+        "MOVPRFX",
+        "UQDECW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecw_pat[_n_s32]",
+    "arguments": [
+      "int32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecw_pat[_n_s64]",
+    "arguments": [
+      "int64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecw_pat[_n_u32]",
+    "arguments": [
+      "uint32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecw_pat[_n_u64]",
+    "arguments": [
+      "uint64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecw_pat[_s32]",
+    "arguments": [
+      "svint32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDECW"
+      ],
+      [
+        "MOVPRFX",
+        "SQDECW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqdecw_pat[_u32]",
+    "arguments": [
+      "svuint32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQDECW"
+      ],
+      [
+        "MOVPRFX",
+        "UQDECW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalb[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalb[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalb[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalb[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalb[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalb[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalb_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalb_lane[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALB"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalbt[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALBT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalbt[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALBT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalbt[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALBT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalbt[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALBT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalbt[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALBT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalbt[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALBT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalt[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalt[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalt[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalt[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalt[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalt[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalt_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlalt_lane[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLALT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLALT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslb[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslb[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslb[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslb[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslb[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslb[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslb_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslb_lane[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLB"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslbt[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLBT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslbt[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLBT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslbt[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLBT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslbt[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLBT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslbt[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLBT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslbt[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLBT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslt[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslt[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslt[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslt[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslt[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslt[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslt_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2",
+      "svint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmlslt_lane[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2",
+      "svint32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMLSLT"
+      ],
+      [
+        "MOVPRFX",
+        "SQDMLSLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmulh[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmulh[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmulh[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmulh[_n_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmulh[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmulh[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmulh[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmulh[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmulh_lane[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmulh_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmulh_lane[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmullb[_n_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmullb[_n_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmullb[_n_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmullb[_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmullb[_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmullb[_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmullb_lane[_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmullb_lane[_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmullt[_n_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmullt[_n_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmullt[_n_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmullt[_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmullt[_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmullt[_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmullt_lane[_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqdmullt_lane[_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQDMULLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincb[_n_s32]",
+    "arguments": [
+      "int32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincb[_n_s64]",
+    "arguments": [
+      "int64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincb[_n_u32]",
+    "arguments": [
+      "uint32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincb[_n_u64]",
+    "arguments": [
+      "uint64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincb_pat[_n_s32]",
+    "arguments": [
+      "int32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincb_pat[_n_s64]",
+    "arguments": [
+      "int64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincb_pat[_n_u32]",
+    "arguments": [
+      "uint32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincb_pat[_n_u64]",
+    "arguments": [
+      "uint64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincd[_n_s32]",
+    "arguments": [
+      "int32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincd[_n_s64]",
+    "arguments": [
+      "int64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincd[_n_u32]",
+    "arguments": [
+      "uint32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincd[_n_u64]",
+    "arguments": [
+      "uint64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincd[_s64]",
+    "arguments": [
+      "svint64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCD"
+      ],
+      [
+        "MOVPRFX",
+        "SQINCD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincd[_u64]",
+    "arguments": [
+      "svuint64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCD"
+      ],
+      [
+        "MOVPRFX",
+        "UQINCD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincd_pat[_n_s32]",
+    "arguments": [
+      "int32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincd_pat[_n_s64]",
+    "arguments": [
+      "int64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincd_pat[_n_u32]",
+    "arguments": [
+      "uint32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincd_pat[_n_u64]",
+    "arguments": [
+      "uint64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincd_pat[_s64]",
+    "arguments": [
+      "svint64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCD"
+      ],
+      [
+        "MOVPRFX",
+        "SQINCD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincd_pat[_u64]",
+    "arguments": [
+      "svuint64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCD"
+      ],
+      [
+        "MOVPRFX",
+        "UQINCD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqinch[_n_s32]",
+    "arguments": [
+      "int32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqinch[_n_s64]",
+    "arguments": [
+      "int64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqinch[_n_u32]",
+    "arguments": [
+      "uint32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqinch[_n_u64]",
+    "arguments": [
+      "uint64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqinch[_s16]",
+    "arguments": [
+      "svint16_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCH"
+      ],
+      [
+        "MOVPRFX",
+        "SQINCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqinch[_u16]",
+    "arguments": [
+      "svuint16_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCH"
+      ],
+      [
+        "MOVPRFX",
+        "UQINCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqinch_pat[_n_s32]",
+    "arguments": [
+      "int32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqinch_pat[_n_s64]",
+    "arguments": [
+      "int64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqinch_pat[_n_u32]",
+    "arguments": [
+      "uint32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqinch_pat[_n_u64]",
+    "arguments": [
+      "uint64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqinch_pat[_s16]",
+    "arguments": [
+      "svint16_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCH"
+      ],
+      [
+        "MOVPRFX",
+        "SQINCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqinch_pat[_u16]",
+    "arguments": [
+      "svuint16_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCH"
+      ],
+      [
+        "MOVPRFX",
+        "UQINCH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_n_s32]_b16",
+    "arguments": [
+      "int32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Wtied"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_n_s32]_b32",
+    "arguments": [
+      "int32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Wtied"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_n_s32]_b64",
+    "arguments": [
+      "int32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Wtied"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_n_s32]_b8",
+    "arguments": [
+      "int32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Wtied"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_n_s64]_b16",
+    "arguments": [
+      "int64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Xtied"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_n_s64]_b32",
+    "arguments": [
+      "int64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Xtied"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_n_s64]_b64",
+    "arguments": [
+      "int64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Xtied"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_n_s64]_b8",
+    "arguments": [
+      "int64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Xtied"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_n_u32]_b16",
+    "arguments": [
+      "uint32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Wtied"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_n_u32]_b32",
+    "arguments": [
+      "uint32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Wtied"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_n_u32]_b64",
+    "arguments": [
+      "uint32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Wtied"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_n_u32]_b8",
+    "arguments": [
+      "uint32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Wtied"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_n_u64]_b16",
+    "arguments": [
+      "uint64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Xtied"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_n_u64]_b32",
+    "arguments": [
+      "uint64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Xtied"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_n_u64]_b64",
+    "arguments": [
+      "uint64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Xtied"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_n_u64]_b8",
+    "arguments": [
+      "uint64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Xtied"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_s16]",
+    "arguments": [
+      "svint16_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCP"
+      ],
+      [
+        "MOVPRFX",
+        "SQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_s32]",
+    "arguments": [
+      "svint32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCP"
+      ],
+      [
+        "MOVPRFX",
+        "SQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_s64]",
+    "arguments": [
+      "svint64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCP"
+      ],
+      [
+        "MOVPRFX",
+        "SQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_u16]",
+    "arguments": [
+      "svuint16_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCP"
+      ],
+      [
+        "MOVPRFX",
+        "UQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_u32]",
+    "arguments": [
+      "svuint32_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCP"
+      ],
+      [
+        "MOVPRFX",
+        "UQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincp[_u64]",
+    "arguments": [
+      "svuint64_t op",
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCP"
+      ],
+      [
+        "MOVPRFX",
+        "UQINCP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincw[_n_s32]",
+    "arguments": [
+      "int32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincw[_n_s64]",
+    "arguments": [
+      "int64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincw[_n_u32]",
+    "arguments": [
+      "uint32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincw[_n_u64]",
+    "arguments": [
+      "uint64_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincw[_s32]",
+    "arguments": [
+      "svint32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCW"
+      ],
+      [
+        "MOVPRFX",
+        "SQINCW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincw[_u32]",
+    "arguments": [
+      "svuint32_t op",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCW"
+      ],
+      [
+        "MOVPRFX",
+        "UQINCW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincw_pat[_n_s32]",
+    "arguments": [
+      "int32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "int32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincw_pat[_n_s64]",
+    "arguments": [
+      "int64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "int64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincw_pat[_n_u32]",
+    "arguments": [
+      "uint32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "uint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Wtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincw_pat[_n_u64]",
+    "arguments": [
+      "uint64_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "uint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Xtied"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincw_pat[_s32]",
+    "arguments": [
+      "svint32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQINCW"
+      ],
+      [
+        "MOVPRFX",
+        "SQINCW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqincw_pat[_u32]",
+    "arguments": [
+      "svuint32_t op",
+      "enum svpattern pattern",
+      "uint64_t imm_factor"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_factor": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQINCW"
+      ],
+      [
+        "MOVPRFX",
+        "UQINCW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqneg[_s16]_m",
+    "arguments": [
+      "svint16_t inactive",
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.H|Ztied.H"
+      },
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQNEG"
+      ],
+      [
+        "MOVPRFX",
+        "SQNEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqneg[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQNEG"
+      ],
+      [
+        "MOVPRFX",
+        "SQNEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqneg[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQNEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqneg[_s32]_m",
+    "arguments": [
+      "svint32_t inactive",
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQNEG"
+      ],
+      [
+        "MOVPRFX",
+        "SQNEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqneg[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQNEG"
+      ],
+      [
+        "MOVPRFX",
+        "SQNEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqneg[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQNEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqneg[_s64]_m",
+    "arguments": [
+      "svint64_t inactive",
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQNEG"
+      ],
+      [
+        "MOVPRFX",
+        "SQNEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqneg[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQNEG"
+      ],
+      [
+        "MOVPRFX",
+        "SQNEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqneg[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQNEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqneg[_s8]_m",
+    "arguments": [
+      "svint8_t inactive",
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.B|Ztied.B"
+      },
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQNEG"
+      ],
+      [
+        "MOVPRFX",
+        "SQNEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqneg[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B|Ztied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQNEG"
+      ],
+      [
+        "MOVPRFX",
+        "SQNEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqneg[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQNEG"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdcmlah[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDCMLAH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDCMLAH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdcmlah[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDCMLAH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDCMLAH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdcmlah[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDCMLAH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDCMLAH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdcmlah[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDCMLAH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDCMLAH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdcmlah_lane[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3",
+      "uint64_t imm_index",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDCMLAH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDCMLAH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdcmlah_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3",
+      "uint64_t imm_index",
+      "uint64_t imm_rotation"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDCMLAH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDCMLAH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlah[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLAH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLAH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlah[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLAH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLAH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlah[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLAH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLAH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlah[_n_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLAH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLAH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlah[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLAH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLAH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlah[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLAH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLAH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlah[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLAH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLAH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlah[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLAH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLAH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlah_lane[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLAH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLAH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlah_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLAH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLAH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlah_lane[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLAH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLAH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlsh[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "int16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLSH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLSH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlsh[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "int32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLSH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLSH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlsh[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "int64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLSH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLSH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlsh[_n_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLSH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLSH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlsh[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLSH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLSH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlsh[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLSH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLSH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlsh[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLSH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLSH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlsh[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLSH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLSH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlsh_lane[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "svint16_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "op3": {
+        "register": "Zop3.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLSH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLSH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlsh_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "svint32_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLSH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLSH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmlsh_lane[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "svint64_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMLSH"
+      ],
+      [
+        "MOVPRFX",
+        "SQRDMLSH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmulh[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmulh[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmulh[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmulh[_n_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmulh[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmulh[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmulh[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmulh[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmulh_lane[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmulh_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrdmulh_lane[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRDMULH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "SRSHR"
+      ],
+      [
+        "SQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "SRSHR"
+      ],
+      [
+        "SQRSHL"
+      ],
+      [
+        "SQRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "SRSHR"
+      ],
+      [
+        "SQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "SRSHR"
+      ],
+      [
+        "SQRSHL"
+      ],
+      [
+        "SQRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "SRSHR"
+      ],
+      [
+        "SQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "SRSHR"
+      ],
+      [
+        "SQRSHL"
+      ],
+      [
+        "SQRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "SRSHR"
+      ],
+      [
+        "SQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "SRSHR"
+      ],
+      [
+        "SQRSHL"
+      ],
+      [
+        "SQRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "URSHR"
+      ],
+      [
+        "UQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "URSHR"
+      ],
+      [
+        "UQRSHL"
+      ],
+      [
+        "UQRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHR"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "URSHR"
+      ],
+      [
+        "UQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "URSHR"
+      ],
+      [
+        "UQRSHL"
+      ],
+      [
+        "UQRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHR"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "URSHR"
+      ],
+      [
+        "UQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "URSHR"
+      ],
+      [
+        "UQRSHL"
+      ],
+      [
+        "UQRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHR"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "URSHR"
+      ],
+      [
+        "UQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "URSHR"
+      ],
+      [
+        "UQRSHL"
+      ],
+      [
+        "UQRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHR"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHL"
+      ],
+      [
+        "SQRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHL"
+      ],
+      [
+        "SQRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHL"
+      ],
+      [
+        "SQRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHL"
+      ],
+      [
+        "SQRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQRSHL"
+      ],
+      [
+        "UQRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQRSHL"
+      ],
+      [
+        "UQRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQRSHL"
+      ],
+      [
+        "UQRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQRSHL"
+      ],
+      [
+        "UQRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshl[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshrnb[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshrnb[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshrnb[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshrnb[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQRSHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshrnb[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQRSHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshrnb[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQRSHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshrnt[_n_s16]",
+    "arguments": [
+      "svint8_t even",
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshrnt[_n_s32]",
+    "arguments": [
+      "svint16_t even",
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshrnt[_n_s64]",
+    "arguments": [
+      "svint32_t even",
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshrnt[_n_u16]",
+    "arguments": [
+      "svuint8_t even",
+      "svuint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQRSHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshrnt[_n_u32]",
+    "arguments": [
+      "svuint16_t even",
+      "svuint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQRSHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshrnt[_n_u64]",
+    "arguments": [
+      "svuint32_t even",
+      "svuint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQRSHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshrunb[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHRUNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshrunb[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHRUNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshrunb[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHRUNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshrunt[_n_s16]",
+    "arguments": [
+      "svuint8_t even",
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHRUNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshrunt[_n_s32]",
+    "arguments": [
+      "svuint16_t even",
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHRUNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqrshrunt[_n_s64]",
+    "arguments": [
+      "svuint32_t even",
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQRSHRUNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "ASR"
+      ],
+      [
+        "SQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "ASR"
+      ],
+      [
+        "SQSHL"
+      ],
+      [
+        "SQSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "ASR"
+      ],
+      [
+        "SQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "ASR"
+      ],
+      [
+        "SQSHL"
+      ],
+      [
+        "SQSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "ASR"
+      ],
+      [
+        "SQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "ASR"
+      ],
+      [
+        "SQSHL"
+      ],
+      [
+        "SQSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "ASR"
+      ],
+      [
+        "SQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "ASR"
+      ],
+      [
+        "SQSHL"
+      ],
+      [
+        "SQSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "ASR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "LSR"
+      ],
+      [
+        "UQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "LSR"
+      ],
+      [
+        "UQSHL"
+      ],
+      [
+        "UQSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "LSR"
+      ],
+      [
+        "UQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "LSR"
+      ],
+      [
+        "UQSHL"
+      ],
+      [
+        "UQSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "LSR"
+      ],
+      [
+        "UQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "LSR"
+      ],
+      [
+        "UQSHL"
+      ],
+      [
+        "UQSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "LSR"
+      ],
+      [
+        "UQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "LSR"
+      ],
+      [
+        "UQSHL"
+      ],
+      [
+        "UQSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "LSR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "SQSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "SQSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "SQSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHL"
+      ],
+      [
+        "SQSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "UQSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "UQSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "UQSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHL"
+      ],
+      [
+        "UQSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshl[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSHL"
+      ],
+      [
+        "MOVPRFX",
+        "UQSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshlu[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 15
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHLU"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHLU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshlu[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 15
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHLU"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHLU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshlu[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 15
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSHLU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshlu[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 31
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHLU"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHLU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshlu[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 31
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHLU"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHLU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshlu[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 31
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSHLU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshlu[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 63
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHLU"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHLU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshlu[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 63
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHLU"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHLU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshlu[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 63
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSHLU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshlu[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHLU"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHLU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshlu[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHLU"
+      ],
+      [
+        "MOVPRFX",
+        "SQSHLU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshlu[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSHLU"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshrnb[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshrnb[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshrnb[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshrnb[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshrnb[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshrnb[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshrnt[_n_s16]",
+    "arguments": [
+      "svint8_t even",
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshrnt[_n_s32]",
+    "arguments": [
+      "svint16_t even",
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshrnt[_n_s64]",
+    "arguments": [
+      "svint32_t even",
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshrnt[_n_u16]",
+    "arguments": [
+      "svuint8_t even",
+      "svuint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshrnt[_n_u32]",
+    "arguments": [
+      "svuint16_t even",
+      "svuint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshrnt[_n_u64]",
+    "arguments": [
+      "svuint32_t even",
+      "svuint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshrunb[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHRUNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshrunb[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHRUNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshrunb[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHRUNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshrunt[_n_s16]",
+    "arguments": [
+      "svuint8_t even",
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHRUNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshrunt[_n_s32]",
+    "arguments": [
+      "svuint16_t even",
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHRUNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqshrunt[_n_s64]",
+    "arguments": [
+      "svuint32_t even",
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSHRUNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqsub[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQSUBR"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqsub[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQSUBR"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqsub[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQSUBR"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqsub[_n_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "SQADD"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQSUBR"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqsub[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUBR"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqsub[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUBR"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqsub[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUBR"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqsub[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUBR"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqsub[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "SQSUBR"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqsub[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "SQSUBR"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqsub[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "SQSUBR"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqsub[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUB"
+      ],
+      [
+        "SQSUBR"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqsub[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUBR"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqsub[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUBR"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqsub[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUBR"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svqsub[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUBR"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsub[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUBR"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUBR"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUBR"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUBR"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUBR"
+      ],
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUBR"
+      ],
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUBR"
+      ],
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUBR"
+      ],
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUBR"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUBR"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUBR"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQSUBR"
+      ],
+      [
+        "SQSUB"
+      ],
+      [
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUBR"
+      ],
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUBR"
+      ],
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUBR"
+      ],
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQSUBR"
+      ],
+      [
+        "UQSUB"
+      ],
+      [
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqsubr[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "UQSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "UQSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqxtnb[_s16]",
+    "arguments": [
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQXTNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqxtnb[_s32]",
+    "arguments": [
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQXTNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqxtnb[_s64]",
+    "arguments": [
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQXTNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqxtnb[_u16]",
+    "arguments": [
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQXTNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqxtnb[_u32]",
+    "arguments": [
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQXTNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqxtnb[_u64]",
+    "arguments": [
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQXTNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqxtnt[_s16]",
+    "arguments": [
+      "svint8_t even",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "op": {
+        "register": "Zop.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQXTNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqxtnt[_s32]",
+    "arguments": [
+      "svint16_t even",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQXTNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqxtnt[_s64]",
+    "arguments": [
+      "svint32_t even",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op": {
+        "register": "Zop.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQXTNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqxtnt[_u16]",
+    "arguments": [
+      "svuint8_t even",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "op": {
+        "register": "Zop.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQXTNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqxtnt[_u32]",
+    "arguments": [
+      "svuint16_t even",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQXTNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqxtnt[_u64]",
+    "arguments": [
+      "svuint32_t even",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op": {
+        "register": "Zop.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQXTNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqxtunb[_s16]",
+    "arguments": [
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQXTUNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqxtunb[_s32]",
+    "arguments": [
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQXTUNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqxtunb[_s64]",
+    "arguments": [
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQXTUNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqxtunt[_s16]",
+    "arguments": [
+      "svuint8_t even",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "op": {
+        "register": "Zop.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQXTUNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqxtunt[_s32]",
+    "arguments": [
+      "svuint16_t even",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQXTUNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svqxtunt[_s64]",
+    "arguments": [
+      "svuint32_t even",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op": {
+        "register": "Zop.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQXTUNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnb[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnb[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnb[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnb[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnb[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnb[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnb[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnb[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnb[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnb[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnb[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnb[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnt[_n_s16]",
+    "arguments": [
+      "svint8_t even",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnt[_n_s32]",
+    "arguments": [
+      "svint16_t even",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnt[_n_s64]",
+    "arguments": [
+      "svint32_t even",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnt[_n_u16]",
+    "arguments": [
+      "svuint8_t even",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnt[_n_u32]",
+    "arguments": [
+      "svuint16_t even",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnt[_n_u64]",
+    "arguments": [
+      "svuint32_t even",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnt[_s16]",
+    "arguments": [
+      "svint8_t even",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnt[_s32]",
+    "arguments": [
+      "svint16_t even",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnt[_s64]",
+    "arguments": [
+      "svint32_t even",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnt[_u16]",
+    "arguments": [
+      "svuint8_t even",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnt[_u32]",
+    "arguments": [
+      "svuint16_t even",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svraddhnt[_u64]",
+    "arguments": [
+      "svuint32_t even",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RADDHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrax1[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RAX1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrax1[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RAX1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_s16]_m",
+    "arguments": [
+      "svint16_t inactive",
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.H|Ztied.H"
+      },
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RBIT"
+      ],
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RBIT"
+      ],
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_s32]_m",
+    "arguments": [
+      "svint32_t inactive",
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RBIT"
+      ],
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RBIT"
+      ],
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_s64]_m",
+    "arguments": [
+      "svint64_t inactive",
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RBIT"
+      ],
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RBIT"
+      ],
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_s8]_m",
+    "arguments": [
+      "svint8_t inactive",
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.B|Ztied.B"
+      },
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RBIT"
+      ],
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B|Ztied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RBIT"
+      ],
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_u16]_m",
+    "arguments": [
+      "svuint16_t inactive",
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.H|Ztied.H"
+      },
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RBIT"
+      ],
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RBIT"
+      ],
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_u32]_m",
+    "arguments": [
+      "svuint32_t inactive",
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RBIT"
+      ],
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RBIT"
+      ],
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_u64]_m",
+    "arguments": [
+      "svuint64_t inactive",
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RBIT"
+      ],
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RBIT"
+      ],
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_u8]_m",
+    "arguments": [
+      "svuint8_t inactive",
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.B|Ztied.B"
+      },
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RBIT"
+      ],
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B|Ztied.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RBIT"
+      ],
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrbit[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "RBIT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrdffr",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RDFFR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrdffr_z",
+    "arguments": [
+      "svbool_t pg"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RDFFR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrecpe[_f32]",
+    "arguments": [
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRECPE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrecpe[_f64]",
+    "arguments": [
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRECPE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrecpe[_u32]_m",
+    "arguments": [
+      "svuint32_t inactive",
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URECPE"
+      ],
+      [
+        "MOVPRFX",
+        "URECPE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrecpe[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URECPE"
+      ],
+      [
+        "MOVPRFX",
+        "URECPE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrecpe[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "URECPE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrecps[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRECPS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrecps[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRECPS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrecpx[_f32]_m",
+    "arguments": [
+      "svfloat32_t inactive",
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRECPX"
+      ],
+      [
+        "MOVPRFX",
+        "FRECPX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrecpx[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRECPX"
+      ],
+      [
+        "MOVPRFX",
+        "FRECPX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrecpx[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FRECPX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrecpx[_f64]_m",
+    "arguments": [
+      "svfloat64_t inactive",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRECPX"
+      ],
+      [
+        "MOVPRFX",
+        "FRECPX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrecpx[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRECPX"
+      ],
+      [
+        "MOVPRFX",
+        "FRECPX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrecpx[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FRECPX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f32[_f32]",
+    "arguments": [
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f32[_f64]",
+    "arguments": [
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f32[_s16]",
+    "arguments": [
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f32[_s32]",
+    "arguments": [
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f32[_s64]",
+    "arguments": [
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f32[_s8]",
+    "arguments": [
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f32[_u16]",
+    "arguments": [
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f32[_u32]",
+    "arguments": [
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f32[_u64]",
+    "arguments": [
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f32[_u8]",
+    "arguments": [
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f64[_f32]",
+    "arguments": [
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f64[_f64]",
+    "arguments": [
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f64[_s16]",
+    "arguments": [
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f64[_s32]",
+    "arguments": [
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f64[_s64]",
+    "arguments": [
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f64[_s8]",
+    "arguments": [
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f64[_u16]",
+    "arguments": [
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f64[_u32]",
+    "arguments": [
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f64[_u64]",
+    "arguments": [
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_f64[_u8]",
+    "arguments": [
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s16[_f32]",
+    "arguments": [
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s16[_f64]",
+    "arguments": [
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s16[_s16]",
+    "arguments": [
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s16[_s32]",
+    "arguments": [
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s16[_s64]",
+    "arguments": [
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s16[_s8]",
+    "arguments": [
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s16[_u16]",
+    "arguments": [
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s16[_u32]",
+    "arguments": [
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s16[_u64]",
+    "arguments": [
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s16[_u8]",
+    "arguments": [
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s32[_f32]",
+    "arguments": [
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s32[_f64]",
+    "arguments": [
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s32[_s16]",
+    "arguments": [
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s32[_s32]",
+    "arguments": [
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s32[_s64]",
+    "arguments": [
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s32[_s8]",
+    "arguments": [
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s32[_u16]",
+    "arguments": [
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s32[_u32]",
+    "arguments": [
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s32[_u64]",
+    "arguments": [
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s32[_u8]",
+    "arguments": [
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s64[_f32]",
+    "arguments": [
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s64[_f64]",
+    "arguments": [
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s64[_s16]",
+    "arguments": [
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s64[_s32]",
+    "arguments": [
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s64[_s64]",
+    "arguments": [
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s64[_s8]",
+    "arguments": [
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s64[_u16]",
+    "arguments": [
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s64[_u32]",
+    "arguments": [
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s64[_u64]",
+    "arguments": [
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s64[_u8]",
+    "arguments": [
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s8[_f32]",
+    "arguments": [
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s8[_f64]",
+    "arguments": [
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s8[_s16]",
+    "arguments": [
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s8[_s32]",
+    "arguments": [
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s8[_s64]",
+    "arguments": [
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s8[_s8]",
+    "arguments": [
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s8[_u16]",
+    "arguments": [
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s8[_u32]",
+    "arguments": [
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s8[_u64]",
+    "arguments": [
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_s8[_u8]",
+    "arguments": [
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u16[_f32]",
+    "arguments": [
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u16[_f64]",
+    "arguments": [
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u16[_s16]",
+    "arguments": [
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u16[_s32]",
+    "arguments": [
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u16[_s64]",
+    "arguments": [
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u16[_s8]",
+    "arguments": [
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u16[_u16]",
+    "arguments": [
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u16[_u32]",
+    "arguments": [
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u16[_u64]",
+    "arguments": [
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u16[_u8]",
+    "arguments": [
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u32[_f32]",
+    "arguments": [
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u32[_f64]",
+    "arguments": [
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u32[_s16]",
+    "arguments": [
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u32[_s32]",
+    "arguments": [
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u32[_s64]",
+    "arguments": [
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u32[_s8]",
+    "arguments": [
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u32[_u16]",
+    "arguments": [
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u32[_u32]",
+    "arguments": [
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u32[_u64]",
+    "arguments": [
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u32[_u8]",
+    "arguments": [
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u64[_f32]",
+    "arguments": [
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u64[_f64]",
+    "arguments": [
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u64[_s16]",
+    "arguments": [
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u64[_s32]",
+    "arguments": [
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u64[_s64]",
+    "arguments": [
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u64[_s8]",
+    "arguments": [
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u64[_u16]",
+    "arguments": [
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u64[_u32]",
+    "arguments": [
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u64[_u64]",
+    "arguments": [
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u64[_u8]",
+    "arguments": [
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u8[_f32]",
+    "arguments": [
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u8[_f64]",
+    "arguments": [
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u8[_s16]",
+    "arguments": [
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u8[_s32]",
+    "arguments": [
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u8[_s64]",
+    "arguments": [
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u8[_s8]",
+    "arguments": [
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u8[_u16]",
+    "arguments": [
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u8[_u32]",
+    "arguments": [
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u8[_u64]",
+    "arguments": [
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svreinterpret_u8[_u8]",
+    "arguments": [
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrev[_f32]",
+    "arguments": [
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrev[_f64]",
+    "arguments": [
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrev[_s16]",
+    "arguments": [
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrev[_s32]",
+    "arguments": [
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrev[_s64]",
+    "arguments": [
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrev[_s8]",
+    "arguments": [
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrev[_u16]",
+    "arguments": [
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrev[_u32]",
+    "arguments": [
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrev[_u64]",
+    "arguments": [
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrev[_u8]",
+    "arguments": [
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrev_b16",
+    "arguments": [
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Pop.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrev_b32",
+    "arguments": [
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Pop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrev_b64",
+    "arguments": [
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Pop.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrev_b8",
+    "arguments": [
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Pop.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REV"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevb[_s16]_m",
+    "arguments": [
+      "svint16_t inactive",
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.H|Ztied.H"
+      },
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVB"
+      ],
+      [
+        "MOVPRFX",
+        "REVB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevb[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVB"
+      ],
+      [
+        "MOVPRFX",
+        "REVB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevb[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "REVB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevb[_s32]_m",
+    "arguments": [
+      "svint32_t inactive",
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVB"
+      ],
+      [
+        "MOVPRFX",
+        "REVB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevb[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVB"
+      ],
+      [
+        "MOVPRFX",
+        "REVB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevb[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "REVB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevb[_s64]_m",
+    "arguments": [
+      "svint64_t inactive",
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVB"
+      ],
+      [
+        "MOVPRFX",
+        "REVB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevb[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVB"
+      ],
+      [
+        "MOVPRFX",
+        "REVB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevb[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "REVB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevb[_u16]_m",
+    "arguments": [
+      "svuint16_t inactive",
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.H|Ztied.H"
+      },
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVB"
+      ],
+      [
+        "MOVPRFX",
+        "REVB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevb[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H|Ztied.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVB"
+      ],
+      [
+        "MOVPRFX",
+        "REVB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevb[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "REVB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevb[_u32]_m",
+    "arguments": [
+      "svuint32_t inactive",
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVB"
+      ],
+      [
+        "MOVPRFX",
+        "REVB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevb[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVB"
+      ],
+      [
+        "MOVPRFX",
+        "REVB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevb[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "REVB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevb[_u64]_m",
+    "arguments": [
+      "svuint64_t inactive",
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVB"
+      ],
+      [
+        "MOVPRFX",
+        "REVB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevb[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVB"
+      ],
+      [
+        "MOVPRFX",
+        "REVB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevb[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "REVB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevh[_s32]_m",
+    "arguments": [
+      "svint32_t inactive",
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVH"
+      ],
+      [
+        "MOVPRFX",
+        "REVH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevh[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVH"
+      ],
+      [
+        "MOVPRFX",
+        "REVH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevh[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "REVH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevh[_s64]_m",
+    "arguments": [
+      "svint64_t inactive",
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVH"
+      ],
+      [
+        "MOVPRFX",
+        "REVH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevh[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVH"
+      ],
+      [
+        "MOVPRFX",
+        "REVH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevh[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "REVH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevh[_u32]_m",
+    "arguments": [
+      "svuint32_t inactive",
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVH"
+      ],
+      [
+        "MOVPRFX",
+        "REVH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevh[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVH"
+      ],
+      [
+        "MOVPRFX",
+        "REVH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevh[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "REVH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevh[_u64]_m",
+    "arguments": [
+      "svuint64_t inactive",
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVH"
+      ],
+      [
+        "MOVPRFX",
+        "REVH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevh[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVH"
+      ],
+      [
+        "MOVPRFX",
+        "REVH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevh[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "REVH"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevw[_s64]_m",
+    "arguments": [
+      "svint64_t inactive",
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVW"
+      ],
+      [
+        "MOVPRFX",
+        "REVW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevw[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVW"
+      ],
+      [
+        "MOVPRFX",
+        "REVW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevw[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "REVW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevw[_u64]_m",
+    "arguments": [
+      "svuint64_t inactive",
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVW"
+      ],
+      [
+        "MOVPRFX",
+        "REVW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevw[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "REVW"
+      ],
+      [
+        "MOVPRFX",
+        "REVW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrevw[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "REVW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRHADD"
+      ],
+      [
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRHADD"
+      ],
+      [
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRHADD"
+      ],
+      [
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRHADD"
+      ],
+      [
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URHADD"
+      ],
+      [
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URHADD"
+      ],
+      [
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URHADD"
+      ],
+      [
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URHADD"
+      ],
+      [
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRHADD"
+      ],
+      [
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRHADD"
+      ],
+      [
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRHADD"
+      ],
+      [
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRHADD"
+      ],
+      [
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ],
+      [
+        "MOVPRFX",
+        "SRHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URHADD"
+      ],
+      [
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URHADD"
+      ],
+      [
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URHADD"
+      ],
+      [
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URHADD"
+      ],
+      [
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrhadd[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "URHADD"
+      ],
+      [
+        "MOVPRFX",
+        "URHADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrinta[_f32]_m",
+    "arguments": [
+      "svfloat32_t inactive",
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTA"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrinta[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTA"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrinta[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FRINTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrinta[_f64]_m",
+    "arguments": [
+      "svfloat64_t inactive",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTA"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrinta[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTA"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrinta[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FRINTA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrinti[_f32]_m",
+    "arguments": [
+      "svfloat32_t inactive",
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTI"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrinti[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTI"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrinti[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FRINTI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrinti[_f64]_m",
+    "arguments": [
+      "svfloat64_t inactive",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTI"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrinti[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTI"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrinti[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FRINTI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintm[_f32]_m",
+    "arguments": [
+      "svfloat32_t inactive",
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTM"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintm[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTM"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintm[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FRINTM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintm[_f64]_m",
+    "arguments": [
+      "svfloat64_t inactive",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTM"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintm[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTM"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintm[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FRINTM"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintn[_f32]_m",
+    "arguments": [
+      "svfloat32_t inactive",
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTN"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintn[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTN"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintn[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FRINTN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintn[_f64]_m",
+    "arguments": [
+      "svfloat64_t inactive",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTN"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintn[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTN"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintn[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FRINTN"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintp[_f32]_m",
+    "arguments": [
+      "svfloat32_t inactive",
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTP"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintp[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTP"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintp[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FRINTP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintp[_f64]_m",
+    "arguments": [
+      "svfloat64_t inactive",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTP"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintp[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTP"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintp[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FRINTP"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintx[_f32]_m",
+    "arguments": [
+      "svfloat32_t inactive",
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTX"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintx[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTX"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintx[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FRINTX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintx[_f64]_m",
+    "arguments": [
+      "svfloat64_t inactive",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTX"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintx[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTX"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintx[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FRINTX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintz[_f32]_m",
+    "arguments": [
+      "svfloat32_t inactive",
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTZ"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintz[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTZ"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintz[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FRINTZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintz[_f64]_m",
+    "arguments": [
+      "svfloat64_t inactive",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTZ"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintz[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRINTZ"
+      ],
+      [
+        "MOVPRFX",
+        "FRINTZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrintz[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FRINTZ"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "SRSHR"
+      ],
+      [
+        "SRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "SRSHR"
+      ],
+      [
+        "SRSHL"
+      ],
+      [
+        "SRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "SRSHR"
+      ],
+      [
+        "SRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "SRSHR"
+      ],
+      [
+        "SRSHL"
+      ],
+      [
+        "SRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "SRSHR"
+      ],
+      [
+        "SRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "SRSHR"
+      ],
+      [
+        "SRSHL"
+      ],
+      [
+        "SRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "SRSHR"
+      ],
+      [
+        "SRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "SRSHR"
+      ],
+      [
+        "SRSHL"
+      ],
+      [
+        "SRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "URSHR"
+      ],
+      [
+        "URSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "URSHR"
+      ],
+      [
+        "URSHL"
+      ],
+      [
+        "URSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "URSHR"
+      ],
+      [
+        "URSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "URSHR"
+      ],
+      [
+        "URSHL"
+      ],
+      [
+        "URSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "URSHR"
+      ],
+      [
+        "URSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "URSHR"
+      ],
+      [
+        "URSHL"
+      ],
+      [
+        "URSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "URSHR"
+      ],
+      [
+        "URSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "LSL"
+      ],
+      [
+        "URSHR"
+      ],
+      [
+        "URSHL"
+      ],
+      [
+        "URSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "LSL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSHL"
+      ],
+      [
+        "SRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSHL"
+      ],
+      [
+        "SRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSHL"
+      ],
+      [
+        "SRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSHL"
+      ],
+      [
+        "SRSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SRSHL"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSHL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSHL"
+      ],
+      [
+        "URSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "URSHL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSHL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSHL"
+      ],
+      [
+        "URSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "URSHL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSHL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSHL"
+      ],
+      [
+        "URSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "URSHL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSHL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSHL"
+      ],
+      [
+        "URSHLR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshl[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "URSHL"
+      ],
+      [
+        "MOVPRFX",
+        "URSHLR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSHR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSHR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSHR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSHR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 64
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSHR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 64
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSHR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 64
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSHR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSHR"
+      ],
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SRSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSHR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSHR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "URSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSHR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSHR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "URSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 64
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSHR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 64
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSHR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 64
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "URSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSHR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSHR"
+      ],
+      [
+        "MOVPRFX",
+        "URSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshr[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "URSHR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshrnb[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshrnb[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshrnb[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshrnb[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshrnb[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshrnb[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshrnt[_n_s16]",
+    "arguments": [
+      "svint8_t even",
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshrnt[_n_s32]",
+    "arguments": [
+      "svint16_t even",
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshrnt[_n_s64]",
+    "arguments": [
+      "svint32_t even",
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshrnt[_n_u16]",
+    "arguments": [
+      "svuint8_t even",
+      "svuint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshrnt[_n_u32]",
+    "arguments": [
+      "svuint16_t even",
+      "svuint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrshrnt[_n_u64]",
+    "arguments": [
+      "svuint32_t even",
+      "svuint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrsqrte[_f32]",
+    "arguments": [
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRSQRTE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrsqrte[_f64]",
+    "arguments": [
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRSQRTE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsqrte[_u32]_m",
+    "arguments": [
+      "svuint32_t inactive",
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSQRTE"
+      ],
+      [
+        "MOVPRFX",
+        "URSQRTE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsqrte[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSQRTE"
+      ],
+      [
+        "MOVPRFX",
+        "URSQRTE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsqrte[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "URSQRTE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrsqrts[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRSQRTS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svrsqrts[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FRSQRTS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsra[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSRA"
+      ],
+      [
+        "MOVPRFX",
+        "SRSRA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsra[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSRA"
+      ],
+      [
+        "MOVPRFX",
+        "SRSRA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsra[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 64
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSRA"
+      ],
+      [
+        "MOVPRFX",
+        "SRSRA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsra[_n_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRSRA"
+      ],
+      [
+        "MOVPRFX",
+        "SRSRA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsra[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSRA"
+      ],
+      [
+        "MOVPRFX",
+        "URSRA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsra[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSRA"
+      ],
+      [
+        "MOVPRFX",
+        "URSRA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsra[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 64
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSRA"
+      ],
+      [
+        "MOVPRFX",
+        "URSRA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsra[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "URSRA"
+      ],
+      [
+        "MOVPRFX",
+        "URSRA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnb[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnb[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnb[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnb[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnb[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnb[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnb[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnb[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnb[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnb[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnb[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnb[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnt[_n_s16]",
+    "arguments": [
+      "svint8_t even",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnt[_n_s32]",
+    "arguments": [
+      "svint16_t even",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnt[_n_s64]",
+    "arguments": [
+      "svint32_t even",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnt[_n_u16]",
+    "arguments": [
+      "svuint8_t even",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnt[_n_u32]",
+    "arguments": [
+      "svuint16_t even",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnt[_n_u64]",
+    "arguments": [
+      "svuint32_t even",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnt[_s16]",
+    "arguments": [
+      "svint8_t even",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnt[_s32]",
+    "arguments": [
+      "svint16_t even",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnt[_s64]",
+    "arguments": [
+      "svint32_t even",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnt[_u16]",
+    "arguments": [
+      "svuint8_t even",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnt[_u32]",
+    "arguments": [
+      "svuint16_t even",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svrsubhnt[_u64]",
+    "arguments": [
+      "svuint32_t even",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "RSUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsbclb[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SBCLB"
+      ],
+      [
+        "MOVPRFX",
+        "SBCLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsbclb[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SBCLB"
+      ],
+      [
+        "MOVPRFX",
+        "SBCLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsbclb[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SBCLB"
+      ],
+      [
+        "MOVPRFX",
+        "SBCLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsbclb[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SBCLB"
+      ],
+      [
+        "MOVPRFX",
+        "SBCLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsbclt[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SBCLT"
+      ],
+      [
+        "MOVPRFX",
+        "SBCLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsbclt[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SBCLT"
+      ],
+      [
+        "MOVPRFX",
+        "SBCLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsbclt[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "svuint32_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "op3": {
+        "register": "Zop3.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SBCLT"
+      ],
+      [
+        "MOVPRFX",
+        "SBCLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsbclt[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "svuint64_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "op3": {
+        "register": "Zop3.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SBCLT"
+      ],
+      [
+        "MOVPRFX",
+        "SBCLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svscale[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSCALE"
+      ],
+      [
+        "MOVPRFX",
+        "FSCALE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svscale[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSCALE"
+      ],
+      [
+        "MOVPRFX",
+        "FSCALE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svscale[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FSCALE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svscale[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSCALE"
+      ],
+      [
+        "MOVPRFX",
+        "FSCALE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svscale[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSCALE"
+      ],
+      [
+        "MOVPRFX",
+        "FSCALE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svscale[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FSCALE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svscale[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSCALE"
+      ],
+      [
+        "MOVPRFX",
+        "FSCALE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svscale[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSCALE"
+      ],
+      [
+        "MOVPRFX",
+        "FSCALE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svscale[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FSCALE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svscale[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSCALE"
+      ],
+      [
+        "MOVPRFX",
+        "FSCALE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svscale[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSCALE"
+      ],
+      [
+        "MOVPRFX",
+        "FSCALE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svscale[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FSCALE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsel[_b]",
+    "arguments": [
+      "svbool_t pg",
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.B"
+      },
+      "op2": {
+        "register": "Pop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SEL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsel[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SEL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsel[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SEL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsel[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SEL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsel[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SEL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsel[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SEL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsel[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SEL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsel[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SEL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsel[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SEL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsel[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SEL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsel[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SEL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset2[_f32]",
+    "arguments": [
+      "svfloat32x2_t tuple",
+      "uint64_t imm_index",
+      "svfloat32_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32x2_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset2[_f64]",
+    "arguments": [
+      "svfloat64x2_t tuple",
+      "uint64_t imm_index",
+      "svfloat64_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64x2_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset2[_s16]",
+    "arguments": [
+      "svint16x2_t tuple",
+      "uint64_t imm_index",
+      "svint16_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16x2_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset2[_s32]",
+    "arguments": [
+      "svint32x2_t tuple",
+      "uint64_t imm_index",
+      "svint32_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32x2_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset2[_s64]",
+    "arguments": [
+      "svint64x2_t tuple",
+      "uint64_t imm_index",
+      "svint64_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64x2_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset2[_s8]",
+    "arguments": [
+      "svint8x2_t tuple",
+      "uint64_t imm_index",
+      "svint8_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8x2_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset2[_u16]",
+    "arguments": [
+      "svuint16x2_t tuple",
+      "uint64_t imm_index",
+      "svuint16_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16x2_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset2[_u32]",
+    "arguments": [
+      "svuint32x2_t tuple",
+      "uint64_t imm_index",
+      "svuint32_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32x2_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset2[_u64]",
+    "arguments": [
+      "svuint64x2_t tuple",
+      "uint64_t imm_index",
+      "svuint64_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64x2_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset2[_u8]",
+    "arguments": [
+      "svuint8x2_t tuple",
+      "uint64_t imm_index",
+      "svuint8_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8x2_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 1
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset3[_f32]",
+    "arguments": [
+      "svfloat32x3_t tuple",
+      "uint64_t imm_index",
+      "svfloat32_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32x3_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset3[_f64]",
+    "arguments": [
+      "svfloat64x3_t tuple",
+      "uint64_t imm_index",
+      "svfloat64_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64x3_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset3[_s16]",
+    "arguments": [
+      "svint16x3_t tuple",
+      "uint64_t imm_index",
+      "svint16_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16x3_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset3[_s32]",
+    "arguments": [
+      "svint32x3_t tuple",
+      "uint64_t imm_index",
+      "svint32_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32x3_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset3[_s64]",
+    "arguments": [
+      "svint64x3_t tuple",
+      "uint64_t imm_index",
+      "svint64_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64x3_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset3[_s8]",
+    "arguments": [
+      "svint8x3_t tuple",
+      "uint64_t imm_index",
+      "svint8_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8x3_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset3[_u16]",
+    "arguments": [
+      "svuint16x3_t tuple",
+      "uint64_t imm_index",
+      "svuint16_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16x3_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset3[_u32]",
+    "arguments": [
+      "svuint32x3_t tuple",
+      "uint64_t imm_index",
+      "svuint32_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32x3_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset3[_u64]",
+    "arguments": [
+      "svuint64x3_t tuple",
+      "uint64_t imm_index",
+      "svuint64_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64x3_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset3[_u8]",
+    "arguments": [
+      "svuint8x3_t tuple",
+      "uint64_t imm_index",
+      "svuint8_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8x3_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 2
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset4[_f32]",
+    "arguments": [
+      "svfloat32x4_t tuple",
+      "uint64_t imm_index",
+      "svfloat32_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32x4_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset4[_f64]",
+    "arguments": [
+      "svfloat64x4_t tuple",
+      "uint64_t imm_index",
+      "svfloat64_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64x4_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset4[_s16]",
+    "arguments": [
+      "svint16x4_t tuple",
+      "uint64_t imm_index",
+      "svint16_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16x4_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset4[_s32]",
+    "arguments": [
+      "svint32x4_t tuple",
+      "uint64_t imm_index",
+      "svint32_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32x4_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset4[_s64]",
+    "arguments": [
+      "svint64x4_t tuple",
+      "uint64_t imm_index",
+      "svint64_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64x4_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset4[_s8]",
+    "arguments": [
+      "svint8x4_t tuple",
+      "uint64_t imm_index",
+      "svint8_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8x4_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset4[_u16]",
+    "arguments": [
+      "svuint16x4_t tuple",
+      "uint64_t imm_index",
+      "svuint16_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16x4_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset4[_u32]",
+    "arguments": [
+      "svuint32x4_t tuple",
+      "uint64_t imm_index",
+      "svuint32_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32x4_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset4[_u64]",
+    "arguments": [
+      "svuint64x4_t tuple",
+      "uint64_t imm_index",
+      "svuint64_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64x4_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svset4[_u8]",
+    "arguments": [
+      "svuint8x4_t tuple",
+      "uint64_t imm_index",
+      "svuint8_t x"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8x4_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsetffr",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {},
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SETFFR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshllb[_n_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSHLLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshllb[_n_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 15
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSHLLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshllb[_n_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 31
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSHLLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshllb[_n_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USHLLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshllb[_n_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 15
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USHLLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshllb[_n_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 31
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USHLLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshllt[_n_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSHLLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshllt[_n_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 15
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSHLLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshllt[_n_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 31
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSHLLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshllt[_n_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USHLLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshllt[_n_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 15
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USHLLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshllt[_n_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 0,
+        "maximum": 31
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USHLLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshrnb[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshrnb[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshrnb[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshrnb[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshrnb[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshrnb[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHRNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshrnt[_n_s16]",
+    "arguments": [
+      "svint8_t even",
+      "svint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshrnt[_n_s32]",
+    "arguments": [
+      "svint16_t even",
+      "svint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshrnt[_n_s64]",
+    "arguments": [
+      "svint32_t even",
+      "svint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshrnt[_n_u16]",
+    "arguments": [
+      "svuint8_t even",
+      "svuint16_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshrnt[_n_u32]",
+    "arguments": [
+      "svuint16_t even",
+      "svuint32_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svshrnt[_n_u64]",
+    "arguments": [
+      "svuint32_t even",
+      "svuint64_t op1",
+      "uint64_t imm2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "imm2": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SHRNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsli[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 15
+      },
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SLI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsli[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 31
+      },
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SLI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsli[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 63
+      },
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SLI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsli[_n_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SLI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsli[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 15
+      },
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SLI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsli[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 31
+      },
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SLI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsli[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 63
+      },
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SLI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsli[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SLI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsm4e[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SM4E"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsm4ekey[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SM4EKEY"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsplice[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SPLICE"
+      ],
+      [
+        "MOVPRFX",
+        "SPLICE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsplice[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SPLICE"
+      ],
+      [
+        "MOVPRFX",
+        "SPLICE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsplice[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SPLICE"
+      ],
+      [
+        "MOVPRFX",
+        "SPLICE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsplice[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SPLICE"
+      ],
+      [
+        "MOVPRFX",
+        "SPLICE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsplice[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SPLICE"
+      ],
+      [
+        "MOVPRFX",
+        "SPLICE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsplice[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SPLICE"
+      ],
+      [
+        "MOVPRFX",
+        "SPLICE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsplice[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SPLICE"
+      ],
+      [
+        "MOVPRFX",
+        "SPLICE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsplice[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SPLICE"
+      ],
+      [
+        "MOVPRFX",
+        "SPLICE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsplice[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SPLICE"
+      ],
+      [
+        "MOVPRFX",
+        "SPLICE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsplice[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SPLICE"
+      ],
+      [
+        "MOVPRFX",
+        "SPLICE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USQADD"
+      ],
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USQADD"
+      ],
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USQADD"
+      ],
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USQADD"
+      ],
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UQADD"
+      ],
+      [
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USQADD"
+      ],
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USQADD"
+      ],
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USQADD"
+      ],
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USQADD"
+      ],
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USQADD"
+      ],
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USQADD"
+      ],
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USQADD"
+      ],
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USQADD"
+      ],
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsqadd[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "USQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsqrt[_f32]_m",
+    "arguments": [
+      "svfloat32_t inactive",
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.S|Ztied.S"
+      },
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSQRT"
+      ],
+      [
+        "MOVPRFX",
+        "FSQRT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsqrt[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S|Ztied.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSQRT"
+      ],
+      [
+        "MOVPRFX",
+        "FSQRT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsqrt[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FSQRT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsqrt[_f64]_m",
+    "arguments": [
+      "svfloat64_t inactive",
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "inactive": {
+        "register": "Zinactive.D|Ztied.D"
+      },
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSQRT"
+      ],
+      [
+        "MOVPRFX",
+        "FSQRT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsqrt[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D|Ztied.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSQRT"
+      ],
+      [
+        "MOVPRFX",
+        "FSQRT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsqrt[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FSQRT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsra[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSRA"
+      ],
+      [
+        "MOVPRFX",
+        "SSRA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsra[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSRA"
+      ],
+      [
+        "MOVPRFX",
+        "SSRA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsra[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 64
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSRA"
+      ],
+      [
+        "MOVPRFX",
+        "SSRA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsra[_n_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSRA"
+      ],
+      [
+        "MOVPRFX",
+        "SSRA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsra[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USRA"
+      ],
+      [
+        "MOVPRFX",
+        "USRA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsra[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USRA"
+      ],
+      [
+        "MOVPRFX",
+        "USRA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsra[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 64
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USRA"
+      ],
+      [
+        "MOVPRFX",
+        "USRA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsra[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USRA"
+      ],
+      [
+        "MOVPRFX",
+        "USRA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsri[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsri[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsri[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 64
+      },
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsri[_n_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsri[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsri[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsri[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 64
+      },
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsri[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SRI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t *base",
+      "svfloat32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t *base",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ],
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t *base",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ],
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "svint8_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svuint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t *base",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ],
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "svuint8_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter[_u32base]_index[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index",
+      "svfloat32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter[_u32base]_index[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter[_u32base]_index[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter[_u32base]_offset[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset",
+      "svfloat32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter[_u32base]_offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter[_u32base]_offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter[_u32base_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "svfloat32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter[_u32base_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter[_u32base_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter[_u64base]_index[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "index * 8": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ],
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter[_u64base]_index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "index * 8": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ],
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter[_u64base]_index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "index * 8": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ],
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter[_u64base]_offset[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ],
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter[_u64base]_offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ],
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter[_u64base]_offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ],
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter[_u64base_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter[_u64base_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter[_u64base_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[s32]index[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t *base",
+      "svint32_t indices",
+      "svfloat32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[s32]index[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "svint32_t indices",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[s32]index[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "svint32_t indices",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[s32]offset[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t *base",
+      "svint32_t offsets",
+      "svfloat32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[s32]offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "svint32_t offsets",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[s32]offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "svint32_t offsets",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[s64]index[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t *base",
+      "svint64_t indices",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[s64]index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t *base",
+      "svint64_t indices",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[s64]index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t *base",
+      "svint64_t indices",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[s64]offset[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t *base",
+      "svint64_t offsets",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[s64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t *base",
+      "svint64_t offsets",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[s64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t *base",
+      "svint64_t offsets",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[u32]index[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t *base",
+      "svuint32_t indices",
+      "svfloat32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[u32]index[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "svuint32_t indices",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[u32]index[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "svuint32_t indices",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[u32]offset[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t *base",
+      "svuint32_t offsets",
+      "svfloat32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[u32]offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "svuint32_t offsets",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[u32]offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "svuint32_t offsets",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[u64]index[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t *base",
+      "svuint64_t indices",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[u64]index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t *base",
+      "svuint64_t indices",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[u64]index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t *base",
+      "svuint64_t indices",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[u64]offset[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t *base",
+      "svuint64_t offsets",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[u64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t *base",
+      "svuint64_t offsets",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_scatter_[u64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t *base",
+      "svuint64_t offsets",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_vnum[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t *base",
+      "int64_t vnum",
+      "svfloat32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_vnum[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t *base",
+      "int64_t vnum",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ],
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_vnum[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "int64_t vnum",
+      "svint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_vnum[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "int64_t vnum",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_vnum[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t *base",
+      "int64_t vnum",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ],
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_vnum[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "int64_t vnum",
+      "svint8_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_vnum[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "int64_t vnum",
+      "svuint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_vnum[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "int64_t vnum",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_vnum[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t *base",
+      "int64_t vnum",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1D"
+      ],
+      [
+        "ST1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1_vnum[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "int64_t vnum",
+      "svuint8_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "svint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "svuint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_scatter[_u32base]_offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_scatter[_u32base]_offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_scatter[_u32base_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_scatter[_u32base_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_scatter[_u64base]_offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_scatter[_u64base]_offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_scatter[_u64base_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_scatter[_u64base_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_scatter_[s32]offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "svint32_t offsets",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_scatter_[s32]offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "svint32_t offsets",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_scatter_[s64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "svint64_t offsets",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_scatter_[s64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "svint64_t offsets",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_scatter_[u32]offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "svuint32_t offsets",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_scatter_[u32]offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "svuint32_t offsets",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_scatter_[u64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "svuint64_t offsets",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_scatter_[u64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "svuint64_t offsets",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_vnum[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "int64_t vnum",
+      "svint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_vnum[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "int64_t vnum",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_vnum[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "int64_t vnum",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_vnum[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "int64_t vnum",
+      "svuint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_vnum[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "int64_t vnum",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1b_vnum[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "int64_t vnum",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1B"
+      ],
+      [
+        "ST1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter[_u32base]_index[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter[_u32base]_index[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter[_u32base]_offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter[_u32base]_offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter[_u32base_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter[_u32base_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter[_u64base]_index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter[_u64base]_index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter[_u64base]_offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter[_u64base]_offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter[_u64base_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter[_u64base_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter_[s32]index[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svint32_t indices",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter_[s32]index[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svint32_t indices",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter_[s32]offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svint32_t offsets",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter_[s32]offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svint32_t offsets",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter_[s64]index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svint64_t indices",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter_[s64]index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svint64_t indices",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter_[s64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svint64_t offsets",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter_[s64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svint64_t offsets",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter_[u32]index[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svuint32_t indices",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter_[u32]index[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svuint32_t indices",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter_[u32]offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svuint32_t offsets",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter_[u32]offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svuint32_t offsets",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter_[u64]index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svuint64_t indices",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter_[u64]index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svuint64_t indices",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter_[u64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svuint64_t offsets",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_scatter_[u64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svuint64_t offsets",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_vnum[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "int64_t vnum",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_vnum[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "int64_t vnum",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_vnum[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "int64_t vnum",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1h_vnum[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "int64_t vnum",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1H"
+      ],
+      [
+        "ST1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1w[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1w[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1w_scatter[_u64base]_index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1w_scatter[_u64base]_index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1w_scatter[_u64base]_offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1w_scatter[_u64base]_offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1w_scatter[_u64base_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1w_scatter[_u64base_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1w_scatter_[s64]index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "svint64_t indices",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1w_scatter_[s64]index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "svint64_t indices",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1w_scatter_[s64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "svint64_t offsets",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1w_scatter_[s64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "svint64_t offsets",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1w_scatter_[u64]index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "svuint64_t indices",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1w_scatter_[u64]index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "svuint64_t indices",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1w_scatter_[u64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "svuint64_t offsets",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1w_scatter_[u64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "svuint64_t offsets",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1w_vnum[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "int64_t vnum",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst1w_vnum[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "int64_t vnum",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST1W"
+      ],
+      [
+        "ST1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t *base",
+      "svfloat32x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.S, Zdata1.S}"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2W"
+      ],
+      [
+        "ST2W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t *base",
+      "svfloat64x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.D, Zdata1.D}"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2D"
+      ],
+      [
+        "ST2D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svint16x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.H, Zdata1.H}"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2H"
+      ],
+      [
+        "ST2H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "svint32x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.S, Zdata1.S}"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2W"
+      ],
+      [
+        "ST2W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t *base",
+      "svint64x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.D, Zdata1.D}"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2D"
+      ],
+      [
+        "ST2D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "svint8x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.B, Zdata1.B}"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2B"
+      ],
+      [
+        "ST2B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svuint16x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.H, Zdata1.H}"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2H"
+      ],
+      [
+        "ST2H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "svuint32x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.S, Zdata1.S}"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2W"
+      ],
+      [
+        "ST2W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t *base",
+      "svuint64x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.D, Zdata1.D}"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2D"
+      ],
+      [
+        "ST2D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "svuint8x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.B, Zdata1.B}"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2B"
+      ],
+      [
+        "ST2B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2_vnum[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t *base",
+      "int64_t vnum",
+      "svfloat32x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.S, Zdata1.S}"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2W"
+      ],
+      [
+        "ST2W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2_vnum[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t *base",
+      "int64_t vnum",
+      "svfloat64x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.D, Zdata1.D}"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2D"
+      ],
+      [
+        "ST2D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2_vnum[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "int64_t vnum",
+      "svint16x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.H, Zdata1.H}"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2H"
+      ],
+      [
+        "ST2H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2_vnum[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "int64_t vnum",
+      "svint32x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.S, Zdata1.S}"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2W"
+      ],
+      [
+        "ST2W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2_vnum[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t *base",
+      "int64_t vnum",
+      "svint64x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.D, Zdata1.D}"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2D"
+      ],
+      [
+        "ST2D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2_vnum[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "int64_t vnum",
+      "svint8x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.B, Zdata1.B}"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2B"
+      ],
+      [
+        "ST2B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2_vnum[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "int64_t vnum",
+      "svuint16x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.H, Zdata1.H}"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2H"
+      ],
+      [
+        "ST2H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2_vnum[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "int64_t vnum",
+      "svuint32x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.S, Zdata1.S}"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2W"
+      ],
+      [
+        "ST2W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2_vnum[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t *base",
+      "int64_t vnum",
+      "svuint64x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.D, Zdata1.D}"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2D"
+      ],
+      [
+        "ST2D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst2_vnum[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "int64_t vnum",
+      "svuint8x2_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.B, Zdata1.B}"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST2B"
+      ],
+      [
+        "ST2B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t *base",
+      "svfloat32x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.S - Zdata2.S}"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3W"
+      ],
+      [
+        "ST3W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t *base",
+      "svfloat64x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.D - Zdata2.D}"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3D"
+      ],
+      [
+        "ST3D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svint16x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.H - Zdata2.H}"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3H"
+      ],
+      [
+        "ST3H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "svint32x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.S - Zdata2.S}"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3W"
+      ],
+      [
+        "ST3W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t *base",
+      "svint64x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.D - Zdata2.D}"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3D"
+      ],
+      [
+        "ST3D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "svint8x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.B - Zdata2.B}"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3B"
+      ],
+      [
+        "ST3B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svuint16x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.H - Zdata2.H}"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3H"
+      ],
+      [
+        "ST3H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "svuint32x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.S - Zdata2.S}"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3W"
+      ],
+      [
+        "ST3W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t *base",
+      "svuint64x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.D - Zdata2.D}"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3D"
+      ],
+      [
+        "ST3D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "svuint8x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.B - Zdata2.B}"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3B"
+      ],
+      [
+        "ST3B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3_vnum[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t *base",
+      "int64_t vnum",
+      "svfloat32x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.S - Zdata2.S}"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3W"
+      ],
+      [
+        "ST3W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3_vnum[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t *base",
+      "int64_t vnum",
+      "svfloat64x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.D - Zdata2.D}"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3D"
+      ],
+      [
+        "ST3D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3_vnum[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "int64_t vnum",
+      "svint16x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.H - Zdata2.H}"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3H"
+      ],
+      [
+        "ST3H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3_vnum[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "int64_t vnum",
+      "svint32x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.S - Zdata2.S}"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3W"
+      ],
+      [
+        "ST3W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3_vnum[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t *base",
+      "int64_t vnum",
+      "svint64x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.D - Zdata2.D}"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3D"
+      ],
+      [
+        "ST3D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3_vnum[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "int64_t vnum",
+      "svint8x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.B - Zdata2.B}"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3B"
+      ],
+      [
+        "ST3B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3_vnum[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "int64_t vnum",
+      "svuint16x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.H - Zdata2.H}"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3H"
+      ],
+      [
+        "ST3H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3_vnum[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "int64_t vnum",
+      "svuint32x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.S - Zdata2.S}"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3W"
+      ],
+      [
+        "ST3W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3_vnum[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t *base",
+      "int64_t vnum",
+      "svuint64x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.D - Zdata2.D}"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3D"
+      ],
+      [
+        "ST3D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst3_vnum[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "int64_t vnum",
+      "svuint8x3_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.B - Zdata2.B}"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST3B"
+      ],
+      [
+        "ST3B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t *base",
+      "svfloat32x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.S - Zdata3.S}"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4W"
+      ],
+      [
+        "ST4W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t *base",
+      "svfloat64x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.D - Zdata3.D}"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4D"
+      ],
+      [
+        "ST4D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svint16x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.H - Zdata3.H}"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4H"
+      ],
+      [
+        "ST4H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "svint32x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.S - Zdata3.S}"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4W"
+      ],
+      [
+        "ST4W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t *base",
+      "svint64x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.D - Zdata3.D}"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4D"
+      ],
+      [
+        "ST4D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "svint8x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.B - Zdata3.B}"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4B"
+      ],
+      [
+        "ST4B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svuint16x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.H - Zdata3.H}"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4H"
+      ],
+      [
+        "ST4H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "svuint32x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.S - Zdata3.S}"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4W"
+      ],
+      [
+        "ST4W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t *base",
+      "svuint64x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.D - Zdata3.D}"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4D"
+      ],
+      [
+        "ST4D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "svuint8x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.B - Zdata3.B}"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4B"
+      ],
+      [
+        "ST4B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4_vnum[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t *base",
+      "int64_t vnum",
+      "svfloat32x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.S - Zdata3.S}"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4W"
+      ],
+      [
+        "ST4W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4_vnum[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t *base",
+      "int64_t vnum",
+      "svfloat64x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.D - Zdata3.D}"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4D"
+      ],
+      [
+        "ST4D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4_vnum[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "int64_t vnum",
+      "svint16x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.H - Zdata3.H}"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4H"
+      ],
+      [
+        "ST4H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4_vnum[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "int64_t vnum",
+      "svint32x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.S - Zdata3.S}"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4W"
+      ],
+      [
+        "ST4W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4_vnum[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t *base",
+      "int64_t vnum",
+      "svint64x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.D - Zdata3.D}"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4D"
+      ],
+      [
+        "ST4D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4_vnum[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "int64_t vnum",
+      "svint8x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.B - Zdata3.B}"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4B"
+      ],
+      [
+        "ST4B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4_vnum[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "int64_t vnum",
+      "svuint16x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.H - Zdata3.H}"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4H"
+      ],
+      [
+        "ST4H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4_vnum[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "int64_t vnum",
+      "svuint32x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.S - Zdata3.S}"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4W"
+      ],
+      [
+        "ST4W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4_vnum[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t *base",
+      "int64_t vnum",
+      "svuint64x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.D - Zdata3.D}"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4D"
+      ],
+      [
+        "ST4D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svst4_vnum[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "int64_t vnum",
+      "svuint8x4_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "{Zdata0.B - Zdata3.B}"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ST4B"
+      ],
+      [
+        "ST4B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t *base",
+      "svfloat32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ],
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t *base",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ],
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ],
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ],
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t *base",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ],
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "svint8_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1B"
+      ],
+      [
+        "STNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svuint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ],
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ],
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t *base",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ],
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "svuint8_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1B"
+      ],
+      [
+        "STNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter[_u32base]_index[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index",
+      "svfloat32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter[_u32base]_index[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter[_u32base]_index[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter[_u32base]_offset[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset",
+      "svfloat32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter[_u32base]_offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter[_u32base]_offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter[_u32base_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "svfloat32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter[_u32base_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter[_u32base_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter[_u64base]_index[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "index * 8": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter[_u64base]_index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "index * 8": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter[_u64base]_index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "index * 8": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter[_u64base]_offset[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter[_u64base]_offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter[_u64base]_offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter[_u64base_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter[_u64base_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter[_u64base_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter_[s64]index[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t *base",
+      "svint64_t indices",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices * 8": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter_[s64]index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t *base",
+      "svint64_t indices",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices * 8": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter_[s64]index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t *base",
+      "svint64_t indices",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices * 8": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter_[s64]offset[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t *base",
+      "svint64_t offsets",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter_[s64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t *base",
+      "svint64_t offsets",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter_[s64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t *base",
+      "svint64_t offsets",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter_[u32]offset[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t *base",
+      "svuint32_t offsets",
+      "svfloat32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter_[u32]offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "svuint32_t offsets",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter_[u32]offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "svuint32_t offsets",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter_[u64]index[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t *base",
+      "svuint64_t indices",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices * 8": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter_[u64]index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t *base",
+      "svuint64_t indices",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices * 8": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter_[u64]index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t *base",
+      "svuint64_t indices",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices * 8": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter_[u64]offset[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t *base",
+      "svuint64_t offsets",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter_[u64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t *base",
+      "svuint64_t offsets",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1_scatter_[u64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t *base",
+      "svuint64_t offsets",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1_vnum[_f32]",
+    "arguments": [
+      "svbool_t pg",
+      "float32_t *base",
+      "int64_t vnum",
+      "svfloat32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ],
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1_vnum[_f64]",
+    "arguments": [
+      "svbool_t pg",
+      "float64_t *base",
+      "int64_t vnum",
+      "svfloat64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ],
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1_vnum[_s16]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "int64_t vnum",
+      "svint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ],
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1_vnum[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "int64_t vnum",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ],
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1_vnum[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int64_t *base",
+      "int64_t vnum",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ],
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1_vnum[_s8]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "int64_t vnum",
+      "svint8_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1B"
+      ],
+      [
+        "STNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1_vnum[_u16]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "int64_t vnum",
+      "svuint16_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      },
+      "vnum * svcnth()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ],
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1_vnum[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "int64_t vnum",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      },
+      "vnum * svcntw()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ],
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1_vnum[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint64_t *base",
+      "int64_t vnum",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      },
+      "vnum * svcntd()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1D"
+      ],
+      [
+        "STNT1D"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svstnt1_vnum[_u8]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "int64_t vnum",
+      "svuint8_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      },
+      "vnum * svcntb()": {
+        "register": "Xindex"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1B"
+      ],
+      [
+        "STNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1b_scatter[_u32base]_offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1b_scatter[_u32base]_offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1b_scatter[_u32base_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1b_scatter[_u32base_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1b_scatter[_u64base]_offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1b_scatter[_u64base]_offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1b_scatter[_u64base_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1b_scatter[_u64base_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1b_scatter_[s64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "svint64_t offsets",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1b_scatter_[s64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "svint64_t offsets",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1b_scatter_[u32]offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "svuint32_t offsets",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1b_scatter_[u32]offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "svuint32_t offsets",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1b_scatter_[u64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int8_t *base",
+      "svuint64_t offsets",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1b_scatter_[u64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint8_t *base",
+      "svuint64_t offsets",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1B"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter[_u32base]_index[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter[_u32base]_index[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t index",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter[_u32base]_offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter[_u32base]_offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "int64_t offset",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter[_u32base_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter[_u32base_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t bases",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.S"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter[_u64base]_index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter[_u64base]_index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "index * 2": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter[_u64base]_offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter[_u64base]_offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter[_u64base_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter[_u64base_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter_[s64]index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svint64_t indices",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices * 2": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter_[s64]index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svint64_t indices",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices * 2": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter_[s64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svint64_t offsets",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter_[s64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svint64_t offsets",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter_[u32]offset[_s32]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svuint32_t offsets",
+      "svint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter_[u32]offset[_u32]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svuint32_t offsets",
+      "svuint32_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.S"
+      },
+      "offsets": {
+        "register": "Zoffsets.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter_[u64]index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svuint64_t indices",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices * 2": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter_[u64]index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svuint64_t indices",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices * 2": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter_[u64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int16_t *base",
+      "svuint64_t offsets",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1h_scatter_[u64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint16_t *base",
+      "svuint64_t offsets",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1H"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1w_scatter[_u64base]_index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1w_scatter[_u64base]_index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t index",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "index * 4": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1w_scatter[_u64base]_offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1w_scatter[_u64base]_offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "int64_t offset",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offset": {
+        "register": "Xoffset"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1w_scatter[_u64base_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1w_scatter[_u64base_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t bases",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "bases": {
+        "register": "Zbases.D"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1w_scatter_[s64]index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "svint64_t indices",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices * 4": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1w_scatter_[s64]index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "svint64_t indices",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices * 4": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1w_scatter_[s64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "svint64_t offsets",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1w_scatter_[s64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "svint64_t offsets",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1w_scatter_[u64]index[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "svuint64_t indices",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices * 4": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1w_scatter_[u64]index[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "svuint64_t indices",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices * 4": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1w_scatter_[u64]offset[_s64]",
+    "arguments": [
+      "svbool_t pg",
+      "int32_t *base",
+      "svuint64_t offsets",
+      "svint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svstnt1w_scatter_[u64]offset[_u64]",
+    "arguments": [
+      "svbool_t pg",
+      "uint32_t *base",
+      "svuint64_t offsets",
+      "svuint64_t data"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "base": {
+        "register": "Xbase"
+      },
+      "data": {
+        "register": "Zdata.D"
+      },
+      "offsets": {
+        "register": "Zoffsets.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "STNT1W"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSUB"
+      ],
+      [
+        "MOVPRFX",
+        "FSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSUB"
+      ],
+      [
+        "FSUBR"
+      ],
+      [
+        "FSUB"
+      ],
+      [
+        "MOVPRFX",
+        "FSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FSUB"
+      ],
+      [
+        "MOVPRFX",
+        "FSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSUB"
+      ],
+      [
+        "MOVPRFX",
+        "FSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSUB"
+      ],
+      [
+        "FSUBR"
+      ],
+      [
+        "FSUB"
+      ],
+      [
+        "MOVPRFX",
+        "FSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FSUB"
+      ],
+      [
+        "MOVPRFX",
+        "FSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSUB"
+      ],
+      [
+        "FADD"
+      ],
+      [
+        "FSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSUB"
+      ],
+      [
+        "FADD"
+      ],
+      [
+        "FSUB"
+      ],
+      [
+        "FSUBR"
+      ],
+      [
+        "FSUB"
+      ],
+      [
+        "MOVPRFX",
+        "FSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FSUB"
+      ],
+      [
+        "MOVPRFX",
+        "FADD"
+      ],
+      [
+        "MOVPRFX",
+        "FSUB"
+      ],
+      [
+        "MOVPRFX",
+        "FSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSUB"
+      ],
+      [
+        "FADD"
+      ],
+      [
+        "FSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSUB"
+      ],
+      [
+        "FADD"
+      ],
+      [
+        "FSUB"
+      ],
+      [
+        "FSUBR"
+      ],
+      [
+        "FSUB"
+      ],
+      [
+        "MOVPRFX",
+        "FSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FSUB"
+      ],
+      [
+        "MOVPRFX",
+        "FADD"
+      ],
+      [
+        "MOVPRFX",
+        "FSUB"
+      ],
+      [
+        "MOVPRFX",
+        "FSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "ADD"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUB"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsub[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUB"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnb[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnb[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnb[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnb[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnb[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnb[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnb[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnb[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnb[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnb[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnb[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnb[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnt[_n_s16]",
+    "arguments": [
+      "svint8_t even",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnt[_n_s32]",
+    "arguments": [
+      "svint16_t even",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnt[_n_s64]",
+    "arguments": [
+      "svint32_t even",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnt[_n_u16]",
+    "arguments": [
+      "svuint8_t even",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnt[_n_u32]",
+    "arguments": [
+      "svuint16_t even",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnt[_n_u64]",
+    "arguments": [
+      "svuint32_t even",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnt[_s16]",
+    "arguments": [
+      "svint8_t even",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnt[_s32]",
+    "arguments": [
+      "svint16_t even",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnt[_s64]",
+    "arguments": [
+      "svint32_t even",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnt[_u16]",
+    "arguments": [
+      "svuint8_t even",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.B"
+      },
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnt[_u32]",
+    "arguments": [
+      "svuint16_t even",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.H"
+      },
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubhnt[_u64]",
+    "arguments": [
+      "svuint32_t even",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "even": {
+        "register": "Ztied.S"
+      },
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBHNT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublb[_n_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublb[_n_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublb[_n_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublb[_n_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublb[_n_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublb[_n_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublb[_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublb[_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublb[_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublb[_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublb[_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublb[_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBLB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublbt[_n_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublbt[_n_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublbt[_n_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublbt[_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublbt[_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublbt[_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLBT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublt[_n_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublt[_n_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublt[_n_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublt[_n_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublt[_n_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublt[_n_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublt[_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublt[_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublt[_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublt[_u16]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublt[_u32]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsublt[_u64]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBLT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubltb[_n_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubltb[_n_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubltb[_n_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubltb[_s16]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubltb[_s32]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubltb[_s64]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBLTB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "FSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSUBR"
+      ],
+      [
+        "FSUB"
+      ],
+      [
+        "FSUB"
+      ],
+      [
+        "MOVPRFX",
+        "FSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "FSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "FSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSUBR"
+      ],
+      [
+        "FSUB"
+      ],
+      [
+        "FSUB"
+      ],
+      [
+        "MOVPRFX",
+        "FSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "FSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_f32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSUBR"
+      ],
+      [
+        "FSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_f32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSUBR"
+      ],
+      [
+        "FSUBR"
+      ],
+      [
+        "FSUB"
+      ],
+      [
+        "FSUB"
+      ],
+      [
+        "MOVPRFX",
+        "FSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_f32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat32_t op1",
+      "float32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "FSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "FSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_f64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSUBR"
+      ],
+      [
+        "FSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_f64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FSUBR"
+      ],
+      [
+        "FSUBR"
+      ],
+      [
+        "FSUB"
+      ],
+      [
+        "FSUB"
+      ],
+      [
+        "MOVPRFX",
+        "FSUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_f64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svfloat64_t op1",
+      "float64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "FSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "FSUBR"
+      ],
+      [
+        "MOVPRFX",
+        "FSUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]|Ztied2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]|Ztied2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]|Ztied2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]|Ztied2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_n_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_u16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_u16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_u16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_u32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_u32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_u32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_u64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_u64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_u64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_u8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUBR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_u8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUBR"
+      ],
+      [
+        "SUB"
+      ],
+      [
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsubr[_u8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUBR"
+      ],
+      [
+        "MOVPRFX",
+        "SUB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwb[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwb[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwb[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwb[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwb[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwb[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwb[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwb[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwb[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwb[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwb[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwb[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBWB"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwt[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "int8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwt[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "int16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwt[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwt[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwt[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwt[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwt[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwt[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwt[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SSUBWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwt[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwt[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svsubwt[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USUBWT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsudot[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint8_t op2",
+      "uint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USDOT"
+      ],
+      [
+        "MOVPRFX",
+        "USDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsudot[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint8_t op2",
+      "svuint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USDOT"
+      ],
+      [
+        "MOVPRFX",
+        "USDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svsudot_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint8_t op2",
+      "svuint8_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUDOT"
+      ],
+      [
+        "MOVPRFX",
+        "SUDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbl2[_f32]",
+    "arguments": [
+      "svfloat32x2_t data",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "{Zdata0.S, Zdata1.S}"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbl2[_f64]",
+    "arguments": [
+      "svfloat64x2_t data",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "{Zdata0.D, Zdata1.D}"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbl2[_s16]",
+    "arguments": [
+      "svint16x2_t data",
+      "svuint16_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "{Zdata0.H, Zdata1.H}"
+      },
+      "indices": {
+        "register": "Zindices.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbl2[_s32]",
+    "arguments": [
+      "svint32x2_t data",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "{Zdata0.S, Zdata1.S}"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbl2[_s64]",
+    "arguments": [
+      "svint64x2_t data",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "{Zdata0.D, Zdata1.D}"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbl2[_s8]",
+    "arguments": [
+      "svint8x2_t data",
+      "svuint8_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "{Zdata0.B, Zdata1.B}"
+      },
+      "indices": {
+        "register": "Zindices.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbl2[_u16]",
+    "arguments": [
+      "svuint16x2_t data",
+      "svuint16_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "{Zdata0.H, Zdata1.H}"
+      },
+      "indices": {
+        "register": "Zindices.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbl2[_u32]",
+    "arguments": [
+      "svuint32x2_t data",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "{Zdata0.S, Zdata1.S}"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbl2[_u64]",
+    "arguments": [
+      "svuint64x2_t data",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "{Zdata0.D, Zdata1.D}"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbl2[_u8]",
+    "arguments": [
+      "svuint8x2_t data",
+      "svuint8_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "{Zdata0.B, Zdata1.B}"
+      },
+      "indices": {
+        "register": "Zindices.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtbl[_f32]",
+    "arguments": [
+      "svfloat32_t data",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtbl[_f64]",
+    "arguments": [
+      "svfloat64_t data",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtbl[_s16]",
+    "arguments": [
+      "svint16_t data",
+      "svuint16_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.H"
+      },
+      "indices": {
+        "register": "Zindices.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtbl[_s32]",
+    "arguments": [
+      "svint32_t data",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtbl[_s64]",
+    "arguments": [
+      "svint64_t data",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtbl[_s8]",
+    "arguments": [
+      "svint8_t data",
+      "svuint8_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.B"
+      },
+      "indices": {
+        "register": "Zindices.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtbl[_u16]",
+    "arguments": [
+      "svuint16_t data",
+      "svuint16_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.H"
+      },
+      "indices": {
+        "register": "Zindices.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtbl[_u32]",
+    "arguments": [
+      "svuint32_t data",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtbl[_u64]",
+    "arguments": [
+      "svuint64_t data",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtbl[_u8]",
+    "arguments": [
+      "svuint8_t data",
+      "svuint8_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.B"
+      },
+      "indices": {
+        "register": "Zindices.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbx[_f32]",
+    "arguments": [
+      "svfloat32_t fallback",
+      "svfloat32_t data",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "fallback": {
+        "register": "Ztied.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbx[_f64]",
+    "arguments": [
+      "svfloat64_t fallback",
+      "svfloat64_t data",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "fallback": {
+        "register": "Ztied.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbx[_s16]",
+    "arguments": [
+      "svint16_t fallback",
+      "svint16_t data",
+      "svuint16_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.H"
+      },
+      "fallback": {
+        "register": "Ztied.H"
+      },
+      "indices": {
+        "register": "Zindices.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbx[_s32]",
+    "arguments": [
+      "svint32_t fallback",
+      "svint32_t data",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "fallback": {
+        "register": "Ztied.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbx[_s64]",
+    "arguments": [
+      "svint64_t fallback",
+      "svint64_t data",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "fallback": {
+        "register": "Ztied.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbx[_s8]",
+    "arguments": [
+      "svint8_t fallback",
+      "svint8_t data",
+      "svuint8_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.B"
+      },
+      "fallback": {
+        "register": "Ztied.B"
+      },
+      "indices": {
+        "register": "Zindices.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbx[_u16]",
+    "arguments": [
+      "svuint16_t fallback",
+      "svuint16_t data",
+      "svuint16_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.H"
+      },
+      "fallback": {
+        "register": "Ztied.H"
+      },
+      "indices": {
+        "register": "Zindices.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbx[_u32]",
+    "arguments": [
+      "svuint32_t fallback",
+      "svuint32_t data",
+      "svuint32_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.S"
+      },
+      "fallback": {
+        "register": "Ztied.S"
+      },
+      "indices": {
+        "register": "Zindices.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbx[_u64]",
+    "arguments": [
+      "svuint64_t fallback",
+      "svuint64_t data",
+      "svuint64_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.D"
+      },
+      "fallback": {
+        "register": "Ztied.D"
+      },
+      "indices": {
+        "register": "Zindices.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svtbx[_u8]",
+    "arguments": [
+      "svuint8_t fallback",
+      "svuint8_t data",
+      "svuint8_t indices"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "data": {
+        "register": "Zdata.B"
+      },
+      "fallback": {
+        "register": "Ztied.B"
+      },
+      "indices": {
+        "register": "Zindices.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TBX"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtmad[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FTMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FTMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtmad[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 0,
+        "maximum": 7
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FTMAD"
+      ],
+      [
+        "MOVPRFX",
+        "FTMAD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1_b16",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.H"
+      },
+      "op2": {
+        "register": "Pop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1_b32",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.S"
+      },
+      "op2": {
+        "register": "Pop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1_b64",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.D"
+      },
+      "op2": {
+        "register": "Pop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1_b8",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.B"
+      },
+      "op2": {
+        "register": "Pop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1q[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1q[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1q[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1q[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1q[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1q[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1q[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1q[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1q[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn1q[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2_b16",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.H"
+      },
+      "op2": {
+        "register": "Pop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2_b32",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.S"
+      },
+      "op2": {
+        "register": "Pop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2_b64",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.D"
+      },
+      "op2": {
+        "register": "Pop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2_b8",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.B"
+      },
+      "op2": {
+        "register": "Pop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2q[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2q[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2q[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2q[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2q[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2q[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2q[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2q[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2q[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtrn2q[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "TRN2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtsmul[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FTSMUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtsmul[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FTSMUL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtssel[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FTSSEL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svtssel[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "FTSSEL"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef2_f32",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef2_f64",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef2_s16",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef2_s32",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef2_s64",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef2_s8",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef2_u16",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef2_u32",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef2_u64",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef2_u8",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8x2_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef3_f32",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef3_f64",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef3_s16",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef3_s32",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef3_s64",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef3_s8",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef3_u16",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef3_u32",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef3_u64",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef3_u8",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8x3_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef4_f32",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef4_f64",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef4_s16",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef4_s32",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef4_s64",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef4_s8",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef4_u16",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef4_u32",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef4_u64",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef4_u8",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8x4_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef_f32",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef_f64",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef_s16",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef_s32",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef_s64",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef_s8",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef_u16",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef_u32",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef_u64",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svundef_u8",
+    "arguments": [],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": null,
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": null
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svunpkhi[_b]",
+    "arguments": [
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Pop.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PUNPKHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svunpkhi[_s16]",
+    "arguments": [
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUNPKHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svunpkhi[_s32]",
+    "arguments": [
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUNPKHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svunpkhi[_s64]",
+    "arguments": [
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUNPKHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svunpkhi[_u16]",
+    "arguments": [
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UUNPKHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svunpkhi[_u32]",
+    "arguments": [
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UUNPKHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svunpkhi[_u64]",
+    "arguments": [
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UUNPKHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svunpklo[_b]",
+    "arguments": [
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Pop.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "PUNPKLO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svunpklo[_s16]",
+    "arguments": [
+      "svint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUNPKLO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svunpklo[_s32]",
+    "arguments": [
+      "svint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUNPKLO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svunpklo[_s64]",
+    "arguments": [
+      "svint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUNPKLO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svunpklo[_u16]",
+    "arguments": [
+      "svuint8_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UUNPKLO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svunpklo[_u32]",
+    "arguments": [
+      "svuint16_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UUNPKLO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svunpklo[_u64]",
+    "arguments": [
+      "svuint32_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Zop.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UUNPKLO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_n_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_n_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_n_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "uint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H[*]"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_n_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_n_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_n_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S[*]"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_n_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_n_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_n_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D[*]"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_n_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_n_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SQADD"
+      ],
+      [
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_n_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "uint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B[*]"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_s16]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_s16]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_s16]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      },
+      "pg": {
+        "register": "Pg.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_s32]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_s32]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_s32]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      },
+      "pg": {
+        "register": "Pg.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_s64]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_s64]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_s64]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      },
+      "pg": {
+        "register": "Pg.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_s8]_m",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_s8]_x",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "SUQADD"
+      ],
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svuqadd[_s8]_z",
+    "arguments": [
+      "svbool_t pg",
+      "svint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "pg": {
+        "register": "Pg.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "MOVPRFX",
+        "SUQADD"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svusdot[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svuint8_t op2",
+      "int8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B[*]"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USDOT"
+      ],
+      [
+        "MOVPRFX",
+        "USDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svusdot[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svuint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USDOT"
+      ],
+      [
+        "MOVPRFX",
+        "USDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svusdot_lane[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svuint8_t op2",
+      "svint8_t op3",
+      "uint64_t imm_index"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm_index": {
+        "minimum": 0,
+        "maximum": 3
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USDOT"
+      ],
+      [
+        "MOVPRFX",
+        "USDOT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svusmmla[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svuint8_t op2",
+      "svint8_t op3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      },
+      "op3": {
+        "register": "Zop3.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "USMMLA"
+      ],
+      [
+        "MOVPRFX",
+        "USMMLA"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1_b16",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.H"
+      },
+      "op2": {
+        "register": "Pop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1_b32",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.S"
+      },
+      "op2": {
+        "register": "Pop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1_b64",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.D"
+      },
+      "op2": {
+        "register": "Pop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1_b8",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.B"
+      },
+      "op2": {
+        "register": "Pop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1q[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1q[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1q[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1q[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1q[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1q[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1q[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1q[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1q[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp1q[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2_b16",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.H"
+      },
+      "op2": {
+        "register": "Pop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2_b32",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.S"
+      },
+      "op2": {
+        "register": "Pop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2_b64",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.D"
+      },
+      "op2": {
+        "register": "Pop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2_b8",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.B"
+      },
+      "op2": {
+        "register": "Pop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2q[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2q[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2q[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2q[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2q[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2q[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2q[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2q[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2q[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svuzp2q[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "UZP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilege_b16[_s32]",
+    "arguments": [
+      "int32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilege_b16[_s64]",
+    "arguments": [
+      "int64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilege_b16[_u32]",
+    "arguments": [
+      "uint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilege_b16[_u64]",
+    "arguments": [
+      "uint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilege_b32[_s32]",
+    "arguments": [
+      "int32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilege_b32[_s64]",
+    "arguments": [
+      "int64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilege_b32[_u32]",
+    "arguments": [
+      "uint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilege_b32[_u64]",
+    "arguments": [
+      "uint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilege_b64[_s32]",
+    "arguments": [
+      "int32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilege_b64[_s64]",
+    "arguments": [
+      "int64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilege_b64[_u32]",
+    "arguments": [
+      "uint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilege_b64[_u64]",
+    "arguments": [
+      "uint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilege_b8[_s32]",
+    "arguments": [
+      "int32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilege_b8[_s64]",
+    "arguments": [
+      "int64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEGE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilege_b8[_u32]",
+    "arguments": [
+      "uint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilege_b8[_u64]",
+    "arguments": [
+      "uint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEHS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilegt_b16[_s32]",
+    "arguments": [
+      "int32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilegt_b16[_s64]",
+    "arguments": [
+      "int64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilegt_b16[_u32]",
+    "arguments": [
+      "uint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilegt_b16[_u64]",
+    "arguments": [
+      "uint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilegt_b32[_s32]",
+    "arguments": [
+      "int32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilegt_b32[_s64]",
+    "arguments": [
+      "int64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilegt_b32[_u32]",
+    "arguments": [
+      "uint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilegt_b32[_u64]",
+    "arguments": [
+      "uint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilegt_b64[_s32]",
+    "arguments": [
+      "int32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilegt_b64[_s64]",
+    "arguments": [
+      "int64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilegt_b64[_u32]",
+    "arguments": [
+      "uint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilegt_b64[_u64]",
+    "arguments": [
+      "uint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilegt_b8[_s32]",
+    "arguments": [
+      "int32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilegt_b8[_s64]",
+    "arguments": [
+      "int64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEGT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilegt_b8[_u32]",
+    "arguments": [
+      "uint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilegt_b8[_u64]",
+    "arguments": [
+      "uint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEHI"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilele_b16[_s32]",
+    "arguments": [
+      "int32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilele_b16[_s64]",
+    "arguments": [
+      "int64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilele_b16[_u32]",
+    "arguments": [
+      "uint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilele_b16[_u64]",
+    "arguments": [
+      "uint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilele_b32[_s32]",
+    "arguments": [
+      "int32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilele_b32[_s64]",
+    "arguments": [
+      "int64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilele_b32[_u32]",
+    "arguments": [
+      "uint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilele_b32[_u64]",
+    "arguments": [
+      "uint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilele_b64[_s32]",
+    "arguments": [
+      "int32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilele_b64[_s64]",
+    "arguments": [
+      "int64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilele_b64[_u32]",
+    "arguments": [
+      "uint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilele_b64[_u64]",
+    "arguments": [
+      "uint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilele_b8[_s32]",
+    "arguments": [
+      "int32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilele_b8[_s64]",
+    "arguments": [
+      "int64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELE"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilele_b8[_u32]",
+    "arguments": [
+      "uint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilele_b8[_u64]",
+    "arguments": [
+      "uint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELS"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilelt_b16[_s32]",
+    "arguments": [
+      "int32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilelt_b16[_s64]",
+    "arguments": [
+      "int64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilelt_b16[_u32]",
+    "arguments": [
+      "uint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilelt_b16[_u64]",
+    "arguments": [
+      "uint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilelt_b32[_s32]",
+    "arguments": [
+      "int32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilelt_b32[_s64]",
+    "arguments": [
+      "int64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilelt_b32[_u32]",
+    "arguments": [
+      "uint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilelt_b32[_u64]",
+    "arguments": [
+      "uint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilelt_b64[_s32]",
+    "arguments": [
+      "int32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilelt_b64[_s64]",
+    "arguments": [
+      "int64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilelt_b64[_u32]",
+    "arguments": [
+      "uint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilelt_b64[_u64]",
+    "arguments": [
+      "uint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilelt_b8[_s32]",
+    "arguments": [
+      "int32_t op1",
+      "int32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilelt_b8[_s64]",
+    "arguments": [
+      "int64_t op1",
+      "int64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELT"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilelt_b8[_u32]",
+    "arguments": [
+      "uint32_t op1",
+      "uint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Wop1"
+      },
+      "op2": {
+        "register": "Wop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwhilelt_b8[_u64]",
+    "arguments": [
+      "uint64_t op1",
+      "uint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILELO"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilerw[_f32]",
+    "arguments": [
+      "const float32_t *op1",
+      "const float32_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILERW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilerw[_f64]",
+    "arguments": [
+      "const float64_t *op1",
+      "const float64_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILERW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilerw[_s16]",
+    "arguments": [
+      "const int16_t *op1",
+      "const int16_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILERW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilerw[_s32]",
+    "arguments": [
+      "const int32_t *op1",
+      "const int32_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILERW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilerw[_s64]",
+    "arguments": [
+      "const int64_t *op1",
+      "const int64_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILERW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilerw[_s8]",
+    "arguments": [
+      "const int8_t *op1",
+      "const int8_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILERW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilerw[_u16]",
+    "arguments": [
+      "const uint16_t *op1",
+      "const uint16_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILERW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilerw[_u32]",
+    "arguments": [
+      "const uint32_t *op1",
+      "const uint32_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILERW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilerw[_u64]",
+    "arguments": [
+      "const uint64_t *op1",
+      "const uint64_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILERW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilerw[_u8]",
+    "arguments": [
+      "const uint8_t *op1",
+      "const uint8_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILERW"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilewr[_f32]",
+    "arguments": [
+      "const float32_t *op1",
+      "const float32_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEWR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilewr[_f64]",
+    "arguments": [
+      "const float64_t *op1",
+      "const float64_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEWR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilewr[_s16]",
+    "arguments": [
+      "const int16_t *op1",
+      "const int16_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEWR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilewr[_s32]",
+    "arguments": [
+      "const int32_t *op1",
+      "const int32_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEWR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilewr[_s64]",
+    "arguments": [
+      "const int64_t *op1",
+      "const int64_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEWR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilewr[_s8]",
+    "arguments": [
+      "const int8_t *op1",
+      "const int8_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEWR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilewr[_u16]",
+    "arguments": [
+      "const uint16_t *op1",
+      "const uint16_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEWR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilewr[_u32]",
+    "arguments": [
+      "const uint32_t *op1",
+      "const uint32_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEWR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilewr[_u64]",
+    "arguments": [
+      "const uint64_t *op1",
+      "const uint64_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEWR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svwhilewr[_u8]",
+    "arguments": [
+      "const uint8_t *op1",
+      "const uint8_t *op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Xop1"
+      },
+      "op2": {
+        "register": "Xop2"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WHILEWR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svwrffr",
+    "arguments": [
+      "svbool_t op"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "void"
+    },
+    "Arguments_Preparation": {
+      "op": {
+        "register": "Pop.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "WRFFR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svxar[_n_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "XAR"
+      ],
+      [
+        "XAR"
+      ],
+      [
+        "MOVPRFX",
+        "XAR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svxar[_n_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "XAR"
+      ],
+      [
+        "XAR"
+      ],
+      [
+        "MOVPRFX",
+        "XAR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svxar[_n_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 64
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "XAR"
+      ],
+      [
+        "XAR"
+      ],
+      [
+        "MOVPRFX",
+        "XAR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svxar[_n_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "XAR"
+      ],
+      [
+        "XAR"
+      ],
+      [
+        "MOVPRFX",
+        "XAR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svxar[_n_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 16
+      },
+      "op1": {
+        "register": "Zop1.H|Ztied1.H"
+      },
+      "op2": {
+        "register": "Zop2.H|Ztied2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "XAR"
+      ],
+      [
+        "XAR"
+      ],
+      [
+        "MOVPRFX",
+        "XAR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svxar[_n_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 32
+      },
+      "op1": {
+        "register": "Zop1.S|Ztied1.S"
+      },
+      "op2": {
+        "register": "Zop2.S|Ztied2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "XAR"
+      ],
+      [
+        "XAR"
+      ],
+      [
+        "MOVPRFX",
+        "XAR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svxar[_n_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 64
+      },
+      "op1": {
+        "register": "Zop1.D|Ztied1.D"
+      },
+      "op2": {
+        "register": "Zop2.D|Ztied2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "XAR"
+      ],
+      [
+        "XAR"
+      ],
+      [
+        "MOVPRFX",
+        "XAR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve2",
+    "name": "svxar[_n_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2",
+      "uint64_t imm3"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "imm3": {
+        "minimum": 1,
+        "maximum": 8
+      },
+      "op1": {
+        "register": "Zop1.B|Ztied1.B"
+      },
+      "op2": {
+        "register": "Zop2.B|Ztied2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "XAR"
+      ],
+      [
+        "XAR"
+      ],
+      [
+        "MOVPRFX",
+        "XAR"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1_b16",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.H"
+      },
+      "op2": {
+        "register": "Pop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1_b32",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.S"
+      },
+      "op2": {
+        "register": "Pop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1_b64",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.D"
+      },
+      "op2": {
+        "register": "Pop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1_b8",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.B"
+      },
+      "op2": {
+        "register": "Pop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1q[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1q[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1q[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1q[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1q[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1q[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1q[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1q[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1q[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip1q[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP1"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.H"
+      },
+      "op2": {
+        "register": "Zop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.S"
+      },
+      "op2": {
+        "register": "Zop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.D"
+      },
+      "op2": {
+        "register": "Zop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.B"
+      },
+      "op2": {
+        "register": "Zop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2_b16",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.H"
+      },
+      "op2": {
+        "register": "Pop2.H"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2_b32",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.S"
+      },
+      "op2": {
+        "register": "Pop2.S"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2_b64",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.D"
+      },
+      "op2": {
+        "register": "Pop2.D"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2_b8",
+    "arguments": [
+      "svbool_t op1",
+      "svbool_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svbool_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Pop1.B"
+      },
+      "op2": {
+        "register": "Pop2.B"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2q[_f32]",
+    "arguments": [
+      "svfloat32_t op1",
+      "svfloat32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svfloat32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2q[_f64]",
+    "arguments": [
+      "svfloat64_t op1",
+      "svfloat64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svfloat64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2q[_s16]",
+    "arguments": [
+      "svint16_t op1",
+      "svint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2q[_s32]",
+    "arguments": [
+      "svint32_t op1",
+      "svint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2q[_s64]",
+    "arguments": [
+      "svint64_t op1",
+      "svint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2q[_s8]",
+    "arguments": [
+      "svint8_t op1",
+      "svint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2q[_u16]",
+    "arguments": [
+      "svuint16_t op1",
+      "svuint16_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "16",
+      "value": "svuint16_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2q[_u32]",
+    "arguments": [
+      "svuint32_t op1",
+      "svuint32_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "32",
+      "value": "svuint32_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2q[_u64]",
+    "arguments": [
+      "svuint64_t op1",
+      "svuint64_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "64",
+      "value": "svuint64_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
+  {
+    "SIMD_ISA": "sve",
+    "name": "svzip2q[_u8]",
+    "arguments": [
+      "svuint8_t op1",
+      "svuint8_t op2"
+    ],
+    "return_type": {
+      "element_bit_size": "8",
+      "value": "svuint8_t"
+    },
+    "Arguments_Preparation": {
+      "op1": {
+        "register": "Zop1.Q"
+      },
+      "op2": {
+        "register": "Zop2.Q"
+      }
+    },
+    "Architectures": [
+      "A64"
+    ],
+    "instructions": [
+      [
+        "ZIP2"
+      ]
+    ]
+  },
   {
     "SIMD_ISA": "Neon",
     "name": "vaba_s16",
@@ -232,6 +187154,7 @@
       "int16x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -265,6 +187188,7 @@
       "int32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -298,6 +187222,7 @@
       "int8x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -331,6 +187256,7 @@
       "uint16x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -364,6 +187290,7 @@
       "uint32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -397,6 +187324,7 @@
       "uint8x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -430,6 +187358,7 @@
       "int16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -461,6 +187390,7 @@
       "int32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -492,6 +187422,7 @@
       "int8x16_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -523,6 +187454,7 @@
       "uint16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -554,6 +187486,7 @@
       "uint32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -585,6 +187518,7 @@
       "uint8x16_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -616,6 +187550,7 @@
       "int16x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -649,6 +187584,7 @@
       "int32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -682,6 +187618,7 @@
       "int8x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -715,6 +187652,7 @@
       "uint16x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -748,6 +187686,7 @@
       "uint32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -781,6 +187720,7 @@
       "uint8x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -814,6 +187754,7 @@
       "int16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -847,6 +187788,7 @@
       "int32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -880,6 +187822,7 @@
       "int8x16_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -913,6 +187856,7 @@
       "uint16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -946,6 +187890,7 @@
       "uint32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -979,6 +187924,7 @@
       "uint8x16_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -1011,6 +187957,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -1040,6 +187987,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -1067,6 +188015,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -1096,6 +188045,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -1125,6 +188075,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -1154,6 +188105,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -1183,6 +188135,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -1212,6 +188165,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -1241,6 +188195,7 @@
       "float64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -1268,6 +188223,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -1295,6 +188251,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -1322,6 +188279,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -1349,6 +188307,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -1376,6 +188335,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -1403,6 +188363,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -1430,6 +188391,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -1459,6 +188421,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -1488,6 +188451,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -1517,6 +188481,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -1546,6 +188511,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -1575,6 +188541,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -1604,6 +188571,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -1633,6 +188601,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -1660,6 +188629,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -1689,6 +188659,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -1718,6 +188689,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -1747,6 +188719,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -1776,6 +188749,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -1805,6 +188779,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -1834,6 +188809,7 @@
       "float32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -1860,6 +188836,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -1885,6 +188862,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -1908,6 +188886,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -1933,6 +188912,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -1958,6 +188938,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -1981,6 +188962,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -2006,6 +188988,7 @@
       "int64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -2029,6 +189012,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -2054,6 +189038,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -2077,6 +189062,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -2102,6 +189088,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -2127,6 +189114,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -2150,6 +189138,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -2176,6 +189165,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -2205,6 +189195,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -2232,6 +189223,7 @@
       "poly16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -2261,6 +189253,7 @@
       "poly64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -2290,6 +189283,7 @@
       "poly8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -2319,6 +189313,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -2348,6 +189343,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -2377,6 +189373,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -2406,6 +189403,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -2435,6 +189433,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -2464,6 +189463,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -2493,6 +189493,7 @@
       "uint64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -2522,6 +189523,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -2551,6 +189553,7 @@
       "int64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -2578,6 +189581,7 @@
       "uint64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -2606,6 +189610,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -2637,6 +189642,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -2668,6 +189674,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -2699,6 +189706,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -2730,6 +189738,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -2761,6 +189770,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -2791,6 +189801,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -2820,6 +189831,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -2849,6 +189861,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -2878,6 +189891,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -2907,6 +189921,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -2936,6 +189951,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -2965,6 +189981,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -2992,6 +190009,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -3019,6 +190037,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -3046,6 +190065,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -3073,6 +190093,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -3100,6 +190121,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -3127,6 +190149,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -3156,6 +190179,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -3185,6 +190209,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -3214,6 +190239,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -3243,6 +190269,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -3272,6 +190299,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -3300,6 +190328,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -3323,6 +190352,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -3346,6 +190376,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -3369,6 +190400,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -3392,6 +190424,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -3415,6 +190448,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -3438,6 +190472,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -3461,6 +190496,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -3484,6 +190520,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -3507,6 +190544,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -3530,6 +190568,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -3553,6 +190592,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -3577,6 +190617,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -3606,6 +190647,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -3633,6 +190675,7 @@
       "poly128_t b"
     ],
     "return_type": {
+      "element_bit_size": "128",
       "value": "poly128_t"
     },
     "Arguments_Preparation": {
@@ -3662,6 +190705,7 @@
       "poly16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -3691,6 +190735,7 @@
       "poly64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -3720,6 +190765,7 @@
       "poly8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -3749,6 +190795,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -3778,6 +190825,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -3807,6 +190855,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -3836,6 +190885,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -3865,6 +190915,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -3894,6 +190945,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -3923,6 +190975,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -3952,6 +191005,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -3980,6 +191034,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -4003,6 +191058,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -4026,6 +191082,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -4049,6 +191106,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -4072,6 +191130,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -4095,6 +191154,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -4118,6 +191178,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -4141,6 +191202,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -4165,6 +191227,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -4188,6 +191251,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -4211,6 +191275,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -4234,6 +191299,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -4257,6 +191323,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -4280,6 +191347,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -4303,6 +191371,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -4326,6 +191395,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -4349,6 +191419,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -4373,6 +191444,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -4400,6 +191472,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -4427,6 +191500,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -4454,6 +191528,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -4481,6 +191556,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -4508,6 +191584,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -4535,6 +191612,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -4564,6 +191642,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -4593,6 +191672,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -4622,6 +191702,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -4651,6 +191732,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -4680,6 +191762,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -4709,6 +191792,7 @@
       "uint8x16_t key"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -4737,6 +191821,7 @@
       "uint8x16_t key"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -4764,6 +191849,7 @@
       "uint8x16_t data"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -4788,6 +191874,7 @@
       "uint8x16_t data"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -4813,6 +191900,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -4842,6 +191930,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -4871,6 +191960,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -4900,6 +191990,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -4929,6 +192020,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -4958,6 +192050,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -4987,6 +192080,7 @@
       "uint64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -5016,6 +192110,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -5045,6 +192140,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -5074,6 +192170,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -5103,6 +192200,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -5132,6 +192230,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -5161,6 +192260,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -5190,6 +192290,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -5219,6 +192320,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -5248,6 +192350,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -5278,6 +192381,7 @@
       "int16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -5305,6 +192409,7 @@
       "int32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -5332,6 +192437,7 @@
       "int64x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -5359,6 +192465,7 @@
       "int8x16_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -5386,6 +192493,7 @@
       "uint16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -5413,6 +192521,7 @@
       "uint32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -5440,6 +192549,7 @@
       "uint64x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -5467,6 +192577,7 @@
       "uint8x16_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -5493,6 +192604,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -5522,6 +192634,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -5551,6 +192664,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -5580,6 +192694,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -5609,6 +192724,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -5638,6 +192754,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -5667,6 +192784,7 @@
       "uint64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -5696,6 +192814,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -5725,6 +192844,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -5754,6 +192874,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -5783,6 +192904,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -5812,6 +192934,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -5841,6 +192964,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -5870,6 +192994,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -5899,6 +193024,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -5928,6 +193054,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -5958,6 +193085,7 @@
       "float32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -5991,6 +193119,7 @@
       "float64x1_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -6022,6 +193151,7 @@
       "poly16x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -6055,6 +193185,7 @@
       "poly64x1_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -6087,6 +193218,7 @@
       "poly8x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -6120,6 +193252,7 @@
       "int16x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -6153,6 +193286,7 @@
       "int32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -6186,6 +193320,7 @@
       "int64x1_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -6219,6 +193354,7 @@
       "int8x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -6252,6 +193388,7 @@
       "uint16x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -6285,6 +193422,7 @@
       "uint32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -6318,6 +193456,7 @@
       "uint64x1_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -6351,6 +193490,7 @@
       "uint8x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -6384,6 +193524,7 @@
       "float32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -6417,6 +193558,7 @@
       "float64x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -6448,6 +193590,7 @@
       "poly16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -6481,6 +193624,7 @@
       "poly64x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -6513,6 +193657,7 @@
       "poly8x16_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -6546,6 +193691,7 @@
       "int16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -6579,6 +193725,7 @@
       "int32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -6612,6 +193759,7 @@
       "int64x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -6645,6 +193793,7 @@
       "int8x16_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -6678,6 +193827,7 @@
       "uint16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -6711,6 +193861,7 @@
       "uint32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -6744,6 +193895,7 @@
       "uint64x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -6777,6 +193929,7 @@
       "uint8x16_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -6809,6 +193962,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -6837,6 +193991,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -6865,6 +194020,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -6893,6 +194049,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -6920,6 +194077,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -6948,6 +194106,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -6975,6 +194134,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -7004,6 +194164,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -7031,6 +194192,7 @@
       "float64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -7058,6 +194220,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -7087,6 +194250,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -7114,6 +194278,7 @@
       "float32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -7141,6 +194306,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -7170,6 +194336,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -7197,6 +194364,7 @@
       "float64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -7224,6 +194392,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -7253,6 +194422,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -7280,6 +194450,7 @@
       "float32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -7307,6 +194478,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -7336,6 +194508,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -7363,6 +194536,7 @@
       "float64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -7390,6 +194564,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -7419,6 +194594,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -7446,6 +194622,7 @@
       "float32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -7473,6 +194650,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -7502,6 +194680,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -7529,6 +194708,7 @@
       "float64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -7556,6 +194736,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -7585,6 +194766,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -7612,6 +194794,7 @@
       "float32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -7639,6 +194822,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -7668,6 +194852,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -7695,6 +194880,7 @@
       "poly64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -7723,6 +194909,7 @@
       "poly8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -7752,6 +194939,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -7781,6 +194969,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -7810,6 +194999,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -7837,6 +195027,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -7866,6 +195057,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -7895,6 +195087,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -7924,6 +195117,7 @@
       "uint64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -7951,6 +195145,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -7980,6 +195175,7 @@
       "float64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -8007,6 +195203,7 @@
       "int64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -8034,6 +195231,7 @@
       "uint64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -8061,6 +195259,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -8090,6 +195289,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -8117,6 +195317,7 @@
       "poly64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -8145,6 +195346,7 @@
       "poly8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -8174,6 +195376,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -8203,6 +195406,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -8232,6 +195436,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -8259,6 +195464,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -8288,6 +195494,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -8317,6 +195524,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -8346,6 +195554,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -8373,6 +195582,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -8402,6 +195612,7 @@
       "float32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -8428,6 +195639,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -8451,6 +195663,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -8474,6 +195687,7 @@
       "poly64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -8498,6 +195712,7 @@
       "poly8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -8521,6 +195736,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -8544,6 +195760,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -8567,6 +195784,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -8590,6 +195808,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -8613,6 +195832,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -8636,6 +195856,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -8659,6 +195880,7 @@
       "uint64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -8682,6 +195904,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -8705,6 +195928,7 @@
       "float64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -8728,6 +195952,7 @@
       "int64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -8751,6 +195976,7 @@
       "uint64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -8774,6 +196000,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -8797,6 +196024,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -8820,6 +196048,7 @@
       "poly64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -8844,6 +196073,7 @@
       "poly8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -8867,6 +196097,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -8890,6 +196121,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -8913,6 +196145,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -8936,6 +196169,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -8959,6 +196193,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -8982,6 +196217,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -9005,6 +196241,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -9028,6 +196265,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -9051,6 +196289,7 @@
       "float32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -9075,6 +196314,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -9104,6 +196344,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -9131,6 +196372,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -9160,6 +196402,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -9189,6 +196432,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -9216,6 +196460,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -9245,6 +196490,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -9274,6 +196520,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -9303,6 +196550,7 @@
       "uint64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -9330,6 +196578,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -9359,6 +196608,7 @@
       "float64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -9386,6 +196636,7 @@
       "int64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -9413,6 +196664,7 @@
       "uint64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -9440,6 +196692,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -9469,6 +196722,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -9496,6 +196750,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -9525,6 +196780,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -9554,6 +196810,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -9581,6 +196838,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -9610,6 +196868,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -9639,6 +196898,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -9668,6 +196928,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -9695,6 +196956,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -9724,6 +196986,7 @@
       "float32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -9750,6 +197013,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -9773,6 +197037,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -9796,6 +197061,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -9819,6 +197085,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -9842,6 +197109,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -9865,6 +197133,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -9888,6 +197157,7 @@
       "float64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -9911,6 +197181,7 @@
       "int64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -9934,6 +197205,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -9957,6 +197229,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -9980,6 +197253,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -10003,6 +197277,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -10026,6 +197301,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -10049,6 +197325,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -10072,6 +197349,7 @@
       "float32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -10096,6 +197374,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -10125,6 +197404,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -10152,6 +197432,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -10181,6 +197462,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -10210,6 +197492,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -10237,6 +197520,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -10266,6 +197550,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -10295,6 +197580,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -10324,6 +197610,7 @@
       "uint64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -10351,6 +197638,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -10380,6 +197668,7 @@
       "float64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -10407,6 +197696,7 @@
       "int64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -10434,6 +197724,7 @@
       "uint64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -10461,6 +197752,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -10490,6 +197782,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -10517,6 +197810,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -10546,6 +197840,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -10575,6 +197870,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -10602,6 +197898,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -10631,6 +197928,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -10660,6 +197958,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -10689,6 +197988,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -10716,6 +198016,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -10745,6 +198046,7 @@
       "float32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -10771,6 +198073,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -10794,6 +198097,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -10817,6 +198121,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -10840,6 +198145,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -10863,6 +198169,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -10886,6 +198193,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -10909,6 +198217,7 @@
       "float64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -10932,6 +198241,7 @@
       "int64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -10955,6 +198265,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -10978,6 +198289,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -11001,6 +198313,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -11024,6 +198337,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -11047,6 +198361,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -11070,6 +198385,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -11093,6 +198409,7 @@
       "float32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -11117,6 +198434,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -11146,6 +198464,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -11173,6 +198492,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -11202,6 +198522,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -11231,6 +198552,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -11258,6 +198580,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -11287,6 +198610,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -11316,6 +198640,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -11345,6 +198670,7 @@
       "uint64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -11372,6 +198698,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -11401,6 +198728,7 @@
       "float64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -11428,6 +198756,7 @@
       "int64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -11455,6 +198784,7 @@
       "uint64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -11482,6 +198812,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -11511,6 +198842,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -11538,6 +198870,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -11567,6 +198900,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -11596,6 +198930,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -11623,6 +198958,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -11652,6 +198988,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -11681,6 +199018,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -11710,6 +199048,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -11737,6 +199076,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -11766,6 +199106,7 @@
       "float32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -11792,6 +199133,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -11815,6 +199157,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -11838,6 +199181,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -11861,6 +199205,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -11884,6 +199229,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -11907,6 +199253,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -11930,6 +199277,7 @@
       "float64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -11953,6 +199301,7 @@
       "int64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -11976,6 +199325,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -11999,6 +199349,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -12022,6 +199373,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -12045,6 +199397,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -12068,6 +199421,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -12091,6 +199445,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -12114,6 +199469,7 @@
       "float32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -12137,6 +199493,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -12162,6 +199519,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -12187,6 +199545,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -12212,6 +199571,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -12237,6 +199597,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -12262,6 +199623,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -12287,6 +199649,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -12312,6 +199675,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -12337,6 +199701,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -12362,6 +199727,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -12387,6 +199753,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -12412,6 +199779,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -12438,6 +199806,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -12467,6 +199836,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -12494,6 +199864,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -12523,6 +199894,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -12552,6 +199924,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -12579,6 +199952,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -12608,6 +199982,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -12637,6 +200012,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -12666,6 +200042,7 @@
       "uint64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -12693,6 +200070,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -12722,6 +200100,7 @@
       "float64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -12749,6 +200128,7 @@
       "int64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -12776,6 +200156,7 @@
       "uint64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -12803,6 +200184,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -12832,6 +200214,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -12859,6 +200242,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -12888,6 +200272,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -12917,6 +200302,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -12944,6 +200330,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -12973,6 +200360,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -13002,6 +200390,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -13031,6 +200420,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -13058,6 +200448,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -13087,6 +200478,7 @@
       "float32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -13113,6 +200505,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -13136,6 +200529,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -13159,6 +200553,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -13182,6 +200577,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -13205,6 +200601,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -13228,6 +200625,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -13251,6 +200649,7 @@
       "float64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -13274,6 +200673,7 @@
       "int64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -13297,6 +200697,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -13320,6 +200721,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -13343,6 +200745,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -13366,6 +200769,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -13389,6 +200793,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -13412,6 +200817,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -13435,6 +200841,7 @@
       "float32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -13458,6 +200865,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -13483,6 +200891,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -13508,6 +200917,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -13533,6 +200943,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -13558,6 +200969,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -13583,6 +200995,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -13608,6 +201021,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -13633,6 +201047,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -13658,6 +201073,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -13683,6 +201099,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -13708,6 +201125,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -13733,6 +201151,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -13760,6 +201179,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -13789,6 +201209,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -13822,6 +201243,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -13855,6 +201277,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -13884,6 +201307,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -13917,6 +201341,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -13950,6 +201375,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -13979,6 +201405,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -14012,6 +201439,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -14045,6 +201473,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -14074,6 +201503,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -14107,6 +201537,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -14140,6 +201571,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -14168,6 +201600,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -14196,6 +201629,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -14229,6 +201663,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -14261,6 +201696,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -14289,6 +201725,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -14317,6 +201754,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -14350,6 +201788,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -14382,6 +201821,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -14410,6 +201850,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -14438,6 +201879,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -14471,6 +201913,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -14503,6 +201946,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -14531,6 +201975,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -14559,6 +202004,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -14592,6 +202038,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -14622,6 +202069,7 @@
       "poly8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -14647,6 +202095,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -14672,6 +202121,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -14697,6 +202147,7 @@
       "poly8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -14722,6 +202173,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -14747,6 +202199,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -14773,6 +202226,7 @@
       "float32x2_t high"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -14803,6 +202257,7 @@
       "float64x1_t high"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -14831,6 +202286,7 @@
       "poly16x4_t high"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -14861,6 +202317,7 @@
       "poly64x1_t high"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -14890,6 +202347,7 @@
       "poly8x8_t high"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -14920,6 +202378,7 @@
       "int16x4_t high"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -14950,6 +202409,7 @@
       "int32x2_t high"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -14980,6 +202440,7 @@
       "int64x1_t high"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -15010,6 +202471,7 @@
       "int8x8_t high"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -15040,6 +202502,7 @@
       "uint16x4_t high"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -15070,6 +202533,7 @@
       "uint32x2_t high"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -15100,6 +202564,7 @@
       "uint64x1_t high"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -15130,6 +202595,7 @@
       "uint8x8_t high"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -15162,6 +202628,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -15199,6 +202666,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -15236,6 +202704,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -15273,6 +202742,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -15311,6 +202781,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -15348,6 +202819,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -15385,6 +202857,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -15422,6 +202895,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -15459,6 +202933,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -15496,6 +202971,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -15533,6 +203009,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -15570,6 +203047,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -15607,6 +203085,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -15644,6 +203123,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -15681,6 +203161,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -15718,6 +203199,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -15755,6 +203237,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -15793,6 +203276,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -15830,6 +203314,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -15867,6 +203352,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -15904,6 +203390,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -15941,6 +203428,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -15978,6 +203466,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -16015,6 +203504,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -16052,6 +203542,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -16089,6 +203580,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -16126,6 +203618,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -16163,6 +203656,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -16200,6 +203694,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -16237,6 +203732,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -16275,6 +203771,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -16312,6 +203809,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -16349,6 +203847,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -16386,6 +203885,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -16423,6 +203923,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -16460,6 +203961,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -16497,6 +203999,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -16534,6 +204037,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -16571,6 +204075,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -16608,6 +204113,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -16645,6 +204151,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -16682,6 +204189,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -16719,6 +204227,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -16757,6 +204266,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -16794,6 +204304,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -16831,6 +204342,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -16868,6 +204380,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -16905,6 +204418,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -16942,6 +204456,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -16979,6 +204494,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -17016,6 +204532,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -17053,6 +204570,7 @@
       "const int lane2"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -17087,6 +204605,7 @@
       "uint64_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -17112,6 +204631,7 @@
       "uint64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -17135,6 +204655,7 @@
       "uint64_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -17160,6 +204681,7 @@
       "uint64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -17184,6 +204706,7 @@
       "uint64_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -17209,6 +204732,7 @@
       "uint64_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -17234,6 +204758,7 @@
       "uint64_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -17259,6 +204784,7 @@
       "uint64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -17284,6 +204810,7 @@
       "uint64_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -17309,6 +204836,7 @@
       "uint64_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -17334,6 +204862,7 @@
       "uint64_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -17359,6 +204888,7 @@
       "uint64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -17384,6 +204914,7 @@
       "uint64_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -17409,6 +204940,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -17432,6 +204964,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -17457,6 +204990,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -17482,6 +205016,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -17505,6 +205040,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -17528,6 +205064,7 @@
       "uint64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -17552,6 +205089,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -17578,6 +205116,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -17602,6 +205141,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -17632,6 +205172,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -17662,6 +205203,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -17690,6 +205232,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -17718,6 +205261,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -17748,6 +205292,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -17776,6 +205321,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -17806,6 +205352,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -17833,6 +205380,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -17858,6 +205406,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -17881,6 +205430,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -17906,6 +205456,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -17929,6 +205480,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -17953,6 +205505,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -17976,6 +205529,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -18000,6 +205554,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -18023,6 +205578,7 @@
       "float64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -18046,6 +205602,7 @@
       "float64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -18069,6 +205626,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -18093,6 +205651,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -18116,6 +205675,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -18140,6 +205700,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -18163,6 +205724,7 @@
       "float32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -18186,6 +205748,7 @@
       "float32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -18209,6 +205772,7 @@
       "int64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -18232,6 +205796,7 @@
       "uint64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -18256,6 +205821,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -18284,6 +205850,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -18312,6 +205879,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -18340,6 +205908,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -18367,6 +205936,7 @@
       "float64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -18390,6 +205960,7 @@
       "float64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -18413,6 +205984,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -18437,6 +206009,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -18460,6 +206033,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -18484,6 +206058,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -18507,6 +206082,7 @@
       "float64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -18530,6 +206106,7 @@
       "float64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -18553,6 +206130,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -18577,6 +206155,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -18600,6 +206179,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -18624,6 +206204,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -18647,6 +206228,7 @@
       "float32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -18670,6 +206252,7 @@
       "float32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -18693,6 +206276,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -18717,6 +206301,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -18740,6 +206325,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -18764,6 +206350,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -18787,6 +206374,7 @@
       "float64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -18810,6 +206398,7 @@
       "float64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -18833,6 +206422,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -18857,6 +206447,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -18880,6 +206471,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -18904,6 +206496,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -18927,6 +206520,7 @@
       "float32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -18950,6 +206544,7 @@
       "float32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -18973,6 +206568,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -18997,6 +206593,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -19020,6 +206617,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -19044,6 +206642,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -19067,6 +206666,7 @@
       "float64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -19090,6 +206690,7 @@
       "float64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -19113,6 +206714,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -19137,6 +206739,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -19160,6 +206763,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -19184,6 +206788,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -19207,6 +206812,7 @@
       "float32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -19230,6 +206836,7 @@
       "float32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -19253,6 +206860,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -19278,6 +206886,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -19303,6 +206912,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -19326,6 +206936,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -19350,6 +206961,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -19380,6 +206992,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -19410,6 +207023,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -19438,6 +207052,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -19466,6 +207081,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -19496,6 +207112,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -19524,6 +207141,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -19554,6 +207172,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -19581,6 +207200,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -19606,6 +207226,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -19629,6 +207250,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -19654,6 +207276,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -19677,6 +207300,7 @@
       "int32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -19700,6 +207324,7 @@
       "uint32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -19724,6 +207349,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -19752,6 +207378,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -19780,6 +207407,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -19808,6 +207436,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -19835,6 +207464,7 @@
       "float32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -19858,6 +207488,7 @@
       "float32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -19881,6 +207512,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -19905,6 +207537,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -19931,6 +207564,7 @@
       "float64_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -19955,6 +207589,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -19982,6 +207617,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -20009,6 +207645,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -20036,6 +207673,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -20065,6 +207703,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -20102,6 +207741,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -20139,6 +207779,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -20175,6 +207816,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -20210,6 +207852,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -20242,6 +207885,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -20275,6 +207919,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -20312,6 +207957,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -20349,6 +207995,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -20385,6 +208032,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -20420,6 +208068,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -20452,6 +208101,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -20483,6 +208133,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -20513,6 +208164,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -20541,6 +208193,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -20571,6 +208224,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -20600,6 +208254,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -20630,6 +208285,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -20660,6 +208316,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -20690,6 +208347,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -20720,6 +208378,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -20750,6 +208409,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -20780,6 +208440,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -20810,6 +208471,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -20840,6 +208502,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -20870,6 +208533,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -20898,6 +208562,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -20926,6 +208591,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -20954,6 +208620,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -20982,6 +208649,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -21010,6 +208678,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -21038,6 +208707,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -21066,6 +208736,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -21094,6 +208765,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -21122,6 +208794,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -21150,6 +208823,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -21178,6 +208852,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -21206,6 +208881,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -21233,6 +208909,7 @@
       "float32_t value"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -21258,6 +208935,7 @@
       "float64_t value"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -21281,6 +208959,7 @@
       "poly16_t value"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -21306,6 +208985,7 @@
       "poly64_t value"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -21330,6 +209010,7 @@
       "poly8_t value"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -21355,6 +209036,7 @@
       "int16_t value"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -21380,6 +209062,7 @@
       "int32_t value"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -21405,6 +209088,7 @@
       "int64_t value"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -21430,6 +209114,7 @@
       "int8_t value"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -21455,6 +209140,7 @@
       "uint16_t value"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -21480,6 +209166,7 @@
       "uint32_t value"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -21505,6 +209192,7 @@
       "uint64_t value"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -21530,6 +209218,7 @@
       "uint8_t value"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -21556,6 +209245,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8_t"
     },
     "Arguments_Preparation": {
@@ -21584,6 +209274,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -21612,6 +209303,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -21640,6 +209332,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8_t"
     },
     "Arguments_Preparation": {
@@ -21668,6 +209361,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -21696,6 +209390,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -21724,6 +209419,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -21752,6 +209448,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -21780,6 +209477,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -21808,6 +209506,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -21836,6 +209535,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -21864,6 +209564,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -21892,6 +209593,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16_t"
     },
     "Arguments_Preparation": {
@@ -21920,6 +209622,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -21948,6 +209651,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -21976,6 +209680,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16_t"
     },
     "Arguments_Preparation": {
@@ -22004,6 +209709,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -22032,6 +209738,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -22060,6 +209767,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -22090,6 +209798,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -22118,6 +209827,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -22148,6 +209858,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -22177,6 +209888,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -22207,6 +209919,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -22237,6 +209950,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -22267,6 +209981,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -22297,6 +210012,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -22327,6 +210043,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -22357,6 +210074,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -22387,6 +210105,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -22417,6 +210136,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -22447,6 +210167,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -22475,6 +210196,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -22503,6 +210225,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -22531,6 +210254,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -22559,6 +210283,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -22587,6 +210312,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -22615,6 +210341,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -22643,6 +210370,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -22671,6 +210399,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -22699,6 +210428,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -22727,6 +210457,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -22755,6 +210486,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -22783,6 +210515,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -22810,6 +210543,7 @@
       "float32_t value"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -22835,6 +210569,7 @@
       "float64_t value"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -22858,6 +210593,7 @@
       "poly16_t value"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -22883,6 +210619,7 @@
       "poly64_t value"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -22907,6 +210644,7 @@
       "poly8_t value"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -22932,6 +210670,7 @@
       "int16_t value"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -22957,6 +210696,7 @@
       "int32_t value"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -22982,6 +210722,7 @@
       "int64_t value"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -23007,6 +210748,7 @@
       "int8_t value"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -23032,6 +210774,7 @@
       "uint16_t value"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -23057,6 +210800,7 @@
       "uint32_t value"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -23082,6 +210826,7 @@
       "uint64_t value"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -23107,6 +210852,7 @@
       "uint8_t value"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -23133,6 +210879,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -23161,6 +210908,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -23189,6 +210937,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -23217,6 +210966,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -23245,6 +210995,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -23273,6 +211024,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -23302,6 +211054,7 @@
       "int16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -23329,6 +211082,7 @@
       "int32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -23356,6 +211110,7 @@
       "int64x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -23383,6 +211138,7 @@
       "int8x16_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -23410,6 +211166,7 @@
       "uint16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -23437,6 +211194,7 @@
       "uint32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -23464,6 +211222,7 @@
       "uint64x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -23491,6 +211250,7 @@
       "uint8x16_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -23517,6 +211277,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -23546,6 +211307,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -23575,6 +211337,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -23604,6 +211367,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -23633,6 +211397,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -23662,6 +211427,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -23691,6 +211457,7 @@
       "uint64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -23720,6 +211487,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -23749,6 +211517,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -23778,6 +211547,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -23807,6 +211577,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -23836,6 +211607,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -23865,6 +211637,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -23894,6 +211667,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -23923,6 +211697,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -23952,6 +211727,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -23982,6 +211758,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -24016,6 +211793,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -24048,6 +211826,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -24082,6 +211861,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -24115,6 +211895,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -24149,6 +211930,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -24183,6 +211965,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -24217,6 +212000,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -24251,6 +212035,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -24285,6 +212070,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -24319,6 +212105,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -24353,6 +212140,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -24387,6 +212175,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -24421,6 +212210,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -24455,6 +212245,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -24487,6 +212278,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -24521,6 +212313,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -24554,6 +212347,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -24588,6 +212382,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -24622,6 +212417,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -24656,6 +212452,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -24690,6 +212487,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -24724,6 +212522,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -24758,6 +212557,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -24792,6 +212592,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -24826,6 +212627,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -24860,6 +212662,7 @@
       "float32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -24893,6 +212696,7 @@
       "float64x1_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -24925,6 +212729,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -24961,6 +212766,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -24997,6 +212803,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -25033,6 +212840,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -25068,6 +212876,7 @@
       "float32_t n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -25101,6 +212910,7 @@
       "float64_t n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -25133,6 +212943,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -25169,6 +212980,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -25204,6 +213016,7 @@
       "float32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -25237,6 +213050,7 @@
       "float64x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -25269,6 +213083,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -25305,6 +213120,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -25341,6 +213157,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -25377,6 +213194,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -25412,6 +213230,7 @@
       "float32_t n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -25445,6 +213264,7 @@
       "float64_t n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -25477,6 +213297,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -25513,6 +213334,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -25548,6 +213370,7 @@
       "float32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -25581,6 +213404,7 @@
       "float64x1_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -25613,6 +213437,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -25649,6 +213474,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -25685,6 +213511,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -25721,6 +213548,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -25756,6 +213584,7 @@
       "float32_t n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -25787,6 +213616,7 @@
       "float64_t n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -25819,6 +213649,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -25855,6 +213686,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -25890,6 +213722,7 @@
       "float32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -25923,6 +213756,7 @@
       "float64x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -25955,6 +213789,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -25991,6 +213826,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -26027,6 +213863,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -26063,6 +213900,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -26098,6 +213936,7 @@
       "float32_t n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -26129,6 +213968,7 @@
       "float64_t n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -26161,6 +214001,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -26197,6 +214038,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -26230,6 +214072,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -26255,6 +214098,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -26278,6 +214122,7 @@
       "poly16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -26303,6 +214148,7 @@
       "poly64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -26327,6 +214173,7 @@
       "poly8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -26352,6 +214199,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -26377,6 +214225,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -26402,6 +214251,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -26427,6 +214277,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -26452,6 +214303,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -26477,6 +214329,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -26502,6 +214355,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -26527,6 +214381,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -26553,6 +214408,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -26583,6 +214439,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -26611,6 +214468,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16_t"
     },
     "Arguments_Preparation": {
@@ -26641,6 +214499,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64_t"
     },
     "Arguments_Preparation": {
@@ -26670,6 +214529,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8_t"
     },
     "Arguments_Preparation": {
@@ -26700,6 +214560,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -26730,6 +214591,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -26760,6 +214622,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -26790,6 +214653,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -26820,6 +214684,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -26850,6 +214715,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -26880,6 +214746,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -26910,6 +214777,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -26939,6 +214807,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -26964,6 +214833,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -26987,6 +214857,7 @@
       "poly16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -27012,6 +214883,7 @@
       "poly64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -27036,6 +214908,7 @@
       "poly8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -27061,6 +214934,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -27086,6 +214960,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -27111,6 +214986,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -27136,6 +215012,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -27161,6 +215038,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -27186,6 +215064,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -27211,6 +215090,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -27236,6 +215116,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -27262,6 +215143,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -27292,6 +215174,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -27320,6 +215203,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16_t"
     },
     "Arguments_Preparation": {
@@ -27350,6 +215234,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64_t"
     },
     "Arguments_Preparation": {
@@ -27379,6 +215264,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8_t"
     },
     "Arguments_Preparation": {
@@ -27409,6 +215295,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -27439,6 +215326,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -27469,6 +215357,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -27499,6 +215388,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -27529,6 +215419,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -27559,6 +215450,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -27589,6 +215481,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -27619,6 +215512,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -27649,6 +215543,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -27678,6 +215573,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -27707,6 +215603,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -27736,6 +215633,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -27765,6 +215663,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -27794,6 +215693,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -27823,6 +215723,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -27852,6 +215753,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -27881,6 +215783,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -27910,6 +215813,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -27939,6 +215843,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -27968,6 +215873,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -27997,6 +215903,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -28026,6 +215933,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -28055,6 +215963,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -28084,6 +215993,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -28113,6 +216023,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -28142,6 +216053,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -28171,6 +216083,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -28200,6 +216113,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -28229,6 +216143,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -28258,6 +216173,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -28287,6 +216203,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -28316,6 +216233,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -28344,6 +216262,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -28369,6 +216288,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -28392,6 +216312,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -28417,6 +216338,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -28441,6 +216363,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -28466,6 +216389,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -28491,6 +216415,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -28516,6 +216441,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -28541,6 +216467,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -28566,6 +216493,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -28591,6 +216519,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -28616,6 +216545,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -28641,6 +216571,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -28666,6 +216597,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -28691,6 +216623,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -28716,6 +216649,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2x3_t"
     },
     "Arguments_Preparation": {
@@ -28741,6 +216675,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2x4_t"
     },
     "Arguments_Preparation": {
@@ -28766,6 +216701,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -28789,6 +216725,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1x2_t"
     },
     "Arguments_Preparation": {
@@ -28812,6 +216749,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1x3_t"
     },
     "Arguments_Preparation": {
@@ -28835,6 +216773,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1x4_t"
     },
     "Arguments_Preparation": {
@@ -28860,6 +216799,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -28894,6 +216834,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -28926,6 +216867,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -28960,6 +216902,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -28993,6 +216936,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -29027,6 +216971,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -29061,6 +217006,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -29095,6 +217041,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -29129,6 +217076,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -29163,6 +217111,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -29197,6 +217146,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -29231,6 +217181,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -29265,6 +217216,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -29297,6 +217249,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -29322,6 +217275,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -29347,6 +217301,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4x3_t"
     },
     "Arguments_Preparation": {
@@ -29372,6 +217327,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4x4_t"
     },
     "Arguments_Preparation": {
@@ -29397,6 +217353,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -29421,6 +217378,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1x2_t"
     },
     "Arguments_Preparation": {
@@ -29445,6 +217403,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1x3_t"
     },
     "Arguments_Preparation": {
@@ -29469,6 +217428,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1x4_t"
     },
     "Arguments_Preparation": {
@@ -29493,6 +217453,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -29518,6 +217479,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -29543,6 +217505,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8x3_t"
     },
     "Arguments_Preparation": {
@@ -29568,6 +217531,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8x4_t"
     },
     "Arguments_Preparation": {
@@ -29593,6 +217557,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -29618,6 +217583,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -29643,6 +217609,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4x3_t"
     },
     "Arguments_Preparation": {
@@ -29668,6 +217635,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4x4_t"
     },
     "Arguments_Preparation": {
@@ -29693,6 +217661,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -29718,6 +217687,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -29743,6 +217713,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2x3_t"
     },
     "Arguments_Preparation": {
@@ -29768,6 +217739,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2x4_t"
     },
     "Arguments_Preparation": {
@@ -29793,6 +217765,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -29818,6 +217791,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1x2_t"
     },
     "Arguments_Preparation": {
@@ -29843,6 +217817,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1x3_t"
     },
     "Arguments_Preparation": {
@@ -29868,6 +217843,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1x4_t"
     },
     "Arguments_Preparation": {
@@ -29893,6 +217869,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -29918,6 +217895,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -29943,6 +217921,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8x3_t"
     },
     "Arguments_Preparation": {
@@ -29968,6 +217947,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8x4_t"
     },
     "Arguments_Preparation": {
@@ -29993,6 +217973,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -30018,6 +217999,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -30043,6 +218025,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4x3_t"
     },
     "Arguments_Preparation": {
@@ -30068,6 +218051,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4x4_t"
     },
     "Arguments_Preparation": {
@@ -30093,6 +218077,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -30118,6 +218103,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -30143,6 +218129,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2x3_t"
     },
     "Arguments_Preparation": {
@@ -30168,6 +218155,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2x4_t"
     },
     "Arguments_Preparation": {
@@ -30193,6 +218181,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -30218,6 +218207,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1x2_t"
     },
     "Arguments_Preparation": {
@@ -30243,6 +218233,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1x3_t"
     },
     "Arguments_Preparation": {
@@ -30268,6 +218259,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1x4_t"
     },
     "Arguments_Preparation": {
@@ -30293,6 +218285,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -30318,6 +218311,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -30343,6 +218337,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8x3_t"
     },
     "Arguments_Preparation": {
@@ -30368,6 +218363,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8x4_t"
     },
     "Arguments_Preparation": {
@@ -30393,6 +218389,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -30418,6 +218415,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -30441,6 +218439,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -30466,6 +218465,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -30490,6 +218490,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -30515,6 +218516,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -30540,6 +218542,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -30565,6 +218568,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -30590,6 +218594,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -30615,6 +218620,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -30640,6 +218646,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -30665,6 +218672,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -30690,6 +218698,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -30715,6 +218724,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -30740,6 +218750,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -30765,6 +218776,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4x3_t"
     },
     "Arguments_Preparation": {
@@ -30790,6 +218802,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4x4_t"
     },
     "Arguments_Preparation": {
@@ -30815,6 +218828,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -30838,6 +218852,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2x2_t"
     },
     "Arguments_Preparation": {
@@ -30861,6 +218876,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2x3_t"
     },
     "Arguments_Preparation": {
@@ -30884,6 +218900,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2x4_t"
     },
     "Arguments_Preparation": {
@@ -30909,6 +218926,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -30943,6 +218961,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -30975,6 +218994,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -31009,6 +219029,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -31042,6 +219063,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -31076,6 +219098,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -31110,6 +219133,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -31144,6 +219168,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -31178,6 +219203,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -31212,6 +219238,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -31246,6 +219273,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -31280,6 +219308,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -31314,6 +219343,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -31346,6 +219376,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -31371,6 +219402,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -31396,6 +219428,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8x3_t"
     },
     "Arguments_Preparation": {
@@ -31421,6 +219454,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8x4_t"
     },
     "Arguments_Preparation": {
@@ -31446,6 +219480,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -31470,6 +219505,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2x2_t"
     },
     "Arguments_Preparation": {
@@ -31494,6 +219530,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2x3_t"
     },
     "Arguments_Preparation": {
@@ -31518,6 +219555,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2x4_t"
     },
     "Arguments_Preparation": {
@@ -31542,6 +219580,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -31567,6 +219606,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -31592,6 +219632,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16x3_t"
     },
     "Arguments_Preparation": {
@@ -31617,6 +219658,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16x4_t"
     },
     "Arguments_Preparation": {
@@ -31642,6 +219684,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -31667,6 +219710,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -31692,6 +219736,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8x3_t"
     },
     "Arguments_Preparation": {
@@ -31717,6 +219762,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8x4_t"
     },
     "Arguments_Preparation": {
@@ -31742,6 +219788,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -31767,6 +219814,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -31792,6 +219840,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4x3_t"
     },
     "Arguments_Preparation": {
@@ -31817,6 +219866,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4x4_t"
     },
     "Arguments_Preparation": {
@@ -31842,6 +219892,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -31867,6 +219918,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2x2_t"
     },
     "Arguments_Preparation": {
@@ -31892,6 +219944,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2x3_t"
     },
     "Arguments_Preparation": {
@@ -31917,6 +219970,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2x4_t"
     },
     "Arguments_Preparation": {
@@ -31942,6 +219996,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -31967,6 +220022,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -31992,6 +220048,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16x3_t"
     },
     "Arguments_Preparation": {
@@ -32017,6 +220074,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16x4_t"
     },
     "Arguments_Preparation": {
@@ -32042,6 +220100,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -32067,6 +220126,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -32092,6 +220152,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8x3_t"
     },
     "Arguments_Preparation": {
@@ -32117,6 +220178,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8x4_t"
     },
     "Arguments_Preparation": {
@@ -32142,6 +220204,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -32167,6 +220230,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -32192,6 +220256,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4x3_t"
     },
     "Arguments_Preparation": {
@@ -32217,6 +220282,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4x4_t"
     },
     "Arguments_Preparation": {
@@ -32242,6 +220308,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -32267,6 +220334,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2x2_t"
     },
     "Arguments_Preparation": {
@@ -32292,6 +220360,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2x3_t"
     },
     "Arguments_Preparation": {
@@ -32317,6 +220386,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2x4_t"
     },
     "Arguments_Preparation": {
@@ -32342,6 +220412,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -32367,6 +220438,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -32392,6 +220464,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16x3_t"
     },
     "Arguments_Preparation": {
@@ -32417,6 +220490,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16x4_t"
     },
     "Arguments_Preparation": {
@@ -32442,6 +220516,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -32467,6 +220542,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1x2_t"
     },
     "Arguments_Preparation": {
@@ -32490,6 +220566,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -32515,6 +220592,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1x2_t"
     },
     "Arguments_Preparation": {
@@ -32539,6 +220617,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -32564,6 +220643,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -32589,6 +220669,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -32614,6 +220695,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1x2_t"
     },
     "Arguments_Preparation": {
@@ -32639,6 +220721,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -32664,6 +220747,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -32689,6 +220773,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -32714,6 +220799,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1x2_t"
     },
     "Arguments_Preparation": {
@@ -32739,6 +220825,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -32764,6 +220851,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -32789,6 +220877,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1x2_t"
     },
     "Arguments_Preparation": {
@@ -32814,6 +220903,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -32848,6 +220938,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1x2_t"
     },
     "Arguments_Preparation": {
@@ -32880,6 +220971,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -32914,6 +221006,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1x2_t"
     },
     "Arguments_Preparation": {
@@ -32946,6 +221039,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -32980,6 +221074,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -33014,6 +221109,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -33048,6 +221144,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1x2_t"
     },
     "Arguments_Preparation": {
@@ -33080,6 +221177,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -33114,6 +221212,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -33148,6 +221247,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -33182,6 +221282,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1x2_t"
     },
     "Arguments_Preparation": {
@@ -33214,6 +221315,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -33246,6 +221348,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -33271,6 +221374,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1x2_t"
     },
     "Arguments_Preparation": {
@@ -33295,6 +221399,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -33320,6 +221425,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -33345,6 +221451,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -33370,6 +221477,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1x2_t"
     },
     "Arguments_Preparation": {
@@ -33395,6 +221503,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -33420,6 +221529,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -33445,6 +221555,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -33470,6 +221581,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1x2_t"
     },
     "Arguments_Preparation": {
@@ -33495,6 +221607,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -33520,6 +221633,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -33545,6 +221659,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2x2_t"
     },
     "Arguments_Preparation": {
@@ -33568,6 +221683,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -33593,6 +221709,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2x2_t"
     },
     "Arguments_Preparation": {
@@ -33616,6 +221733,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -33641,6 +221759,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -33666,6 +221785,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -33691,6 +221811,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2x2_t"
     },
     "Arguments_Preparation": {
@@ -33714,6 +221835,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -33739,6 +221861,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -33764,6 +221887,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -33789,6 +221913,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2x2_t"
     },
     "Arguments_Preparation": {
@@ -33812,6 +221937,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -33837,6 +221963,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -33862,6 +221989,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2x2_t"
     },
     "Arguments_Preparation": {
@@ -33887,6 +222015,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -33921,6 +222050,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2x2_t"
     },
     "Arguments_Preparation": {
@@ -33953,6 +222083,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -33987,6 +222118,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2x2_t"
     },
     "Arguments_Preparation": {
@@ -34019,6 +222151,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -34051,6 +222184,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -34085,6 +222219,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -34119,6 +222254,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2x2_t"
     },
     "Arguments_Preparation": {
@@ -34151,6 +222287,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -34183,6 +222320,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -34217,6 +222355,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -34251,6 +222390,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2x2_t"
     },
     "Arguments_Preparation": {
@@ -34283,6 +222423,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -34313,6 +222454,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -34338,6 +222480,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2x2_t"
     },
     "Arguments_Preparation": {
@@ -34361,6 +222504,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -34386,6 +222530,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -34411,6 +222556,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -34436,6 +222582,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2x2_t"
     },
     "Arguments_Preparation": {
@@ -34459,6 +222606,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -34484,6 +222632,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -34509,6 +222658,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -34534,6 +222684,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2x2_t"
     },
     "Arguments_Preparation": {
@@ -34557,6 +222708,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -34582,6 +222734,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2x3_t"
     },
     "Arguments_Preparation": {
@@ -34607,6 +222760,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1x3_t"
     },
     "Arguments_Preparation": {
@@ -34630,6 +222784,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4x3_t"
     },
     "Arguments_Preparation": {
@@ -34655,6 +222810,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1x3_t"
     },
     "Arguments_Preparation": {
@@ -34679,6 +222835,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8x3_t"
     },
     "Arguments_Preparation": {
@@ -34704,6 +222861,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4x3_t"
     },
     "Arguments_Preparation": {
@@ -34729,6 +222887,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2x3_t"
     },
     "Arguments_Preparation": {
@@ -34754,6 +222913,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1x3_t"
     },
     "Arguments_Preparation": {
@@ -34779,6 +222939,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8x3_t"
     },
     "Arguments_Preparation": {
@@ -34804,6 +222965,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4x3_t"
     },
     "Arguments_Preparation": {
@@ -34829,6 +222991,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2x3_t"
     },
     "Arguments_Preparation": {
@@ -34854,6 +223017,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1x3_t"
     },
     "Arguments_Preparation": {
@@ -34879,6 +223043,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8x3_t"
     },
     "Arguments_Preparation": {
@@ -34904,6 +223069,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2x3_t"
     },
     "Arguments_Preparation": {
@@ -34929,6 +223095,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1x3_t"
     },
     "Arguments_Preparation": {
@@ -34954,6 +223121,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2x3_t"
     },
     "Arguments_Preparation": {
@@ -34988,6 +223156,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1x3_t"
     },
     "Arguments_Preparation": {
@@ -35020,6 +223189,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4x3_t"
     },
     "Arguments_Preparation": {
@@ -35054,6 +223224,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1x3_t"
     },
     "Arguments_Preparation": {
@@ -35086,6 +223257,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8x3_t"
     },
     "Arguments_Preparation": {
@@ -35120,6 +223292,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4x3_t"
     },
     "Arguments_Preparation": {
@@ -35154,6 +223327,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2x3_t"
     },
     "Arguments_Preparation": {
@@ -35188,6 +223362,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1x3_t"
     },
     "Arguments_Preparation": {
@@ -35220,6 +223395,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8x3_t"
     },
     "Arguments_Preparation": {
@@ -35254,6 +223430,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4x3_t"
     },
     "Arguments_Preparation": {
@@ -35288,6 +223465,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2x3_t"
     },
     "Arguments_Preparation": {
@@ -35322,6 +223500,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1x3_t"
     },
     "Arguments_Preparation": {
@@ -35354,6 +223533,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8x3_t"
     },
     "Arguments_Preparation": {
@@ -35386,6 +223566,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4x3_t"
     },
     "Arguments_Preparation": {
@@ -35411,6 +223592,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1x3_t"
     },
     "Arguments_Preparation": {
@@ -35435,6 +223617,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8x3_t"
     },
     "Arguments_Preparation": {
@@ -35460,6 +223643,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4x3_t"
     },
     "Arguments_Preparation": {
@@ -35485,6 +223669,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2x3_t"
     },
     "Arguments_Preparation": {
@@ -35510,6 +223695,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1x3_t"
     },
     "Arguments_Preparation": {
@@ -35535,6 +223721,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8x3_t"
     },
     "Arguments_Preparation": {
@@ -35560,6 +223747,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4x3_t"
     },
     "Arguments_Preparation": {
@@ -35585,6 +223773,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2x3_t"
     },
     "Arguments_Preparation": {
@@ -35610,6 +223799,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1x3_t"
     },
     "Arguments_Preparation": {
@@ -35635,6 +223825,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8x3_t"
     },
     "Arguments_Preparation": {
@@ -35660,6 +223851,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4x3_t"
     },
     "Arguments_Preparation": {
@@ -35685,6 +223877,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2x3_t"
     },
     "Arguments_Preparation": {
@@ -35708,6 +223901,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8x3_t"
     },
     "Arguments_Preparation": {
@@ -35733,6 +223927,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2x3_t"
     },
     "Arguments_Preparation": {
@@ -35756,6 +223951,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16x3_t"
     },
     "Arguments_Preparation": {
@@ -35781,6 +223977,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8x3_t"
     },
     "Arguments_Preparation": {
@@ -35806,6 +224003,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4x3_t"
     },
     "Arguments_Preparation": {
@@ -35831,6 +224029,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2x3_t"
     },
     "Arguments_Preparation": {
@@ -35854,6 +224053,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16x3_t"
     },
     "Arguments_Preparation": {
@@ -35879,6 +224079,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8x3_t"
     },
     "Arguments_Preparation": {
@@ -35904,6 +224105,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4x3_t"
     },
     "Arguments_Preparation": {
@@ -35929,6 +224131,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2x3_t"
     },
     "Arguments_Preparation": {
@@ -35952,6 +224155,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16x3_t"
     },
     "Arguments_Preparation": {
@@ -35977,6 +224181,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4x3_t"
     },
     "Arguments_Preparation": {
@@ -36002,6 +224207,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2x3_t"
     },
     "Arguments_Preparation": {
@@ -36027,6 +224233,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4x3_t"
     },
     "Arguments_Preparation": {
@@ -36061,6 +224268,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2x3_t"
     },
     "Arguments_Preparation": {
@@ -36093,6 +224301,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8x3_t"
     },
     "Arguments_Preparation": {
@@ -36127,6 +224336,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2x3_t"
     },
     "Arguments_Preparation": {
@@ -36159,6 +224369,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16x3_t"
     },
     "Arguments_Preparation": {
@@ -36191,6 +224402,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8x3_t"
     },
     "Arguments_Preparation": {
@@ -36225,6 +224437,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4x3_t"
     },
     "Arguments_Preparation": {
@@ -36259,6 +224472,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2x3_t"
     },
     "Arguments_Preparation": {
@@ -36291,6 +224505,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16x3_t"
     },
     "Arguments_Preparation": {
@@ -36323,6 +224538,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8x3_t"
     },
     "Arguments_Preparation": {
@@ -36357,6 +224573,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4x3_t"
     },
     "Arguments_Preparation": {
@@ -36391,6 +224608,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2x3_t"
     },
     "Arguments_Preparation": {
@@ -36423,6 +224641,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16x3_t"
     },
     "Arguments_Preparation": {
@@ -36453,6 +224672,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8x3_t"
     },
     "Arguments_Preparation": {
@@ -36478,6 +224698,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2x3_t"
     },
     "Arguments_Preparation": {
@@ -36501,6 +224722,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16x3_t"
     },
     "Arguments_Preparation": {
@@ -36526,6 +224748,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8x3_t"
     },
     "Arguments_Preparation": {
@@ -36551,6 +224774,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4x3_t"
     },
     "Arguments_Preparation": {
@@ -36576,6 +224800,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2x3_t"
     },
     "Arguments_Preparation": {
@@ -36599,6 +224824,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16x3_t"
     },
     "Arguments_Preparation": {
@@ -36624,6 +224850,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8x3_t"
     },
     "Arguments_Preparation": {
@@ -36649,6 +224876,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4x3_t"
     },
     "Arguments_Preparation": {
@@ -36674,6 +224902,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2x3_t"
     },
     "Arguments_Preparation": {
@@ -36697,6 +224926,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16x3_t"
     },
     "Arguments_Preparation": {
@@ -36722,6 +224952,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2x4_t"
     },
     "Arguments_Preparation": {
@@ -36747,6 +224978,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1x4_t"
     },
     "Arguments_Preparation": {
@@ -36770,6 +225002,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4x4_t"
     },
     "Arguments_Preparation": {
@@ -36795,6 +225028,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1x4_t"
     },
     "Arguments_Preparation": {
@@ -36819,6 +225053,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8x4_t"
     },
     "Arguments_Preparation": {
@@ -36844,6 +225079,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4x4_t"
     },
     "Arguments_Preparation": {
@@ -36869,6 +225105,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2x4_t"
     },
     "Arguments_Preparation": {
@@ -36894,6 +225131,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1x4_t"
     },
     "Arguments_Preparation": {
@@ -36919,6 +225157,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8x4_t"
     },
     "Arguments_Preparation": {
@@ -36944,6 +225183,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4x4_t"
     },
     "Arguments_Preparation": {
@@ -36969,6 +225209,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2x4_t"
     },
     "Arguments_Preparation": {
@@ -36994,6 +225235,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1x4_t"
     },
     "Arguments_Preparation": {
@@ -37019,6 +225261,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8x4_t"
     },
     "Arguments_Preparation": {
@@ -37044,6 +225287,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2x4_t"
     },
     "Arguments_Preparation": {
@@ -37069,6 +225313,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1x4_t"
     },
     "Arguments_Preparation": {
@@ -37094,6 +225339,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2x4_t"
     },
     "Arguments_Preparation": {
@@ -37128,6 +225374,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1x4_t"
     },
     "Arguments_Preparation": {
@@ -37160,6 +225407,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4x4_t"
     },
     "Arguments_Preparation": {
@@ -37194,6 +225442,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1x4_t"
     },
     "Arguments_Preparation": {
@@ -37226,6 +225475,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8x4_t"
     },
     "Arguments_Preparation": {
@@ -37260,6 +225510,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4x4_t"
     },
     "Arguments_Preparation": {
@@ -37294,6 +225545,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2x4_t"
     },
     "Arguments_Preparation": {
@@ -37328,6 +225580,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1x4_t"
     },
     "Arguments_Preparation": {
@@ -37360,6 +225613,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8x4_t"
     },
     "Arguments_Preparation": {
@@ -37394,6 +225648,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4x4_t"
     },
     "Arguments_Preparation": {
@@ -37428,6 +225683,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2x4_t"
     },
     "Arguments_Preparation": {
@@ -37462,6 +225718,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1x4_t"
     },
     "Arguments_Preparation": {
@@ -37494,6 +225751,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8x4_t"
     },
     "Arguments_Preparation": {
@@ -37526,6 +225784,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4x4_t"
     },
     "Arguments_Preparation": {
@@ -37551,6 +225810,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1x4_t"
     },
     "Arguments_Preparation": {
@@ -37575,6 +225835,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8x4_t"
     },
     "Arguments_Preparation": {
@@ -37600,6 +225861,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4x4_t"
     },
     "Arguments_Preparation": {
@@ -37625,6 +225887,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2x4_t"
     },
     "Arguments_Preparation": {
@@ -37650,6 +225913,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1x4_t"
     },
     "Arguments_Preparation": {
@@ -37675,6 +225939,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8x4_t"
     },
     "Arguments_Preparation": {
@@ -37700,6 +225965,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4x4_t"
     },
     "Arguments_Preparation": {
@@ -37725,6 +225991,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2x4_t"
     },
     "Arguments_Preparation": {
@@ -37750,6 +226017,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1x4_t"
     },
     "Arguments_Preparation": {
@@ -37775,6 +226043,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8x4_t"
     },
     "Arguments_Preparation": {
@@ -37800,6 +226069,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4x4_t"
     },
     "Arguments_Preparation": {
@@ -37825,6 +226095,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2x4_t"
     },
     "Arguments_Preparation": {
@@ -37848,6 +226119,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8x4_t"
     },
     "Arguments_Preparation": {
@@ -37873,6 +226145,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2x4_t"
     },
     "Arguments_Preparation": {
@@ -37896,6 +226169,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16x4_t"
     },
     "Arguments_Preparation": {
@@ -37921,6 +226195,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8x4_t"
     },
     "Arguments_Preparation": {
@@ -37946,6 +226221,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4x4_t"
     },
     "Arguments_Preparation": {
@@ -37971,6 +226247,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2x4_t"
     },
     "Arguments_Preparation": {
@@ -37994,6 +226271,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16x4_t"
     },
     "Arguments_Preparation": {
@@ -38019,6 +226297,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8x4_t"
     },
     "Arguments_Preparation": {
@@ -38044,6 +226323,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4x4_t"
     },
     "Arguments_Preparation": {
@@ -38069,6 +226349,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2x4_t"
     },
     "Arguments_Preparation": {
@@ -38092,6 +226373,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16x4_t"
     },
     "Arguments_Preparation": {
@@ -38117,6 +226399,7 @@
       "float32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4x4_t"
     },
     "Arguments_Preparation": {
@@ -38142,6 +226425,7 @@
       "float64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2x4_t"
     },
     "Arguments_Preparation": {
@@ -38167,6 +226451,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4x4_t"
     },
     "Arguments_Preparation": {
@@ -38201,6 +226486,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2x4_t"
     },
     "Arguments_Preparation": {
@@ -38233,6 +226519,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8x4_t"
     },
     "Arguments_Preparation": {
@@ -38267,6 +226554,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2x4_t"
     },
     "Arguments_Preparation": {
@@ -38299,6 +226587,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16x4_t"
     },
     "Arguments_Preparation": {
@@ -38331,6 +226620,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8x4_t"
     },
     "Arguments_Preparation": {
@@ -38365,6 +226655,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4x4_t"
     },
     "Arguments_Preparation": {
@@ -38399,6 +226690,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2x4_t"
     },
     "Arguments_Preparation": {
@@ -38431,6 +226723,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16x4_t"
     },
     "Arguments_Preparation": {
@@ -38463,6 +226756,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8x4_t"
     },
     "Arguments_Preparation": {
@@ -38497,6 +226791,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4x4_t"
     },
     "Arguments_Preparation": {
@@ -38531,6 +226826,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2x4_t"
     },
     "Arguments_Preparation": {
@@ -38563,6 +226859,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16x4_t"
     },
     "Arguments_Preparation": {
@@ -38593,6 +226890,7 @@
       "poly16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8x4_t"
     },
     "Arguments_Preparation": {
@@ -38618,6 +226916,7 @@
       "poly64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2x4_t"
     },
     "Arguments_Preparation": {
@@ -38641,6 +226940,7 @@
       "poly8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16x4_t"
     },
     "Arguments_Preparation": {
@@ -38666,6 +226966,7 @@
       "int16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8x4_t"
     },
     "Arguments_Preparation": {
@@ -38691,6 +226992,7 @@
       "int32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4x4_t"
     },
     "Arguments_Preparation": {
@@ -38716,6 +227018,7 @@
       "int64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2x4_t"
     },
     "Arguments_Preparation": {
@@ -38739,6 +227042,7 @@
       "int8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16x4_t"
     },
     "Arguments_Preparation": {
@@ -38764,6 +227068,7 @@
       "uint16_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8x4_t"
     },
     "Arguments_Preparation": {
@@ -38789,6 +227094,7 @@
       "uint32_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4x4_t"
     },
     "Arguments_Preparation": {
@@ -38814,6 +227120,7 @@
       "uint64_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2x4_t"
     },
     "Arguments_Preparation": {
@@ -38837,6 +227144,7 @@
       "uint8_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16x4_t"
     },
     "Arguments_Preparation": {
@@ -38862,6 +227170,7 @@
       "poly128_t const * ptr"
     ],
     "return_type": {
+      "element_bit_size": "128",
       "value": "poly128_t"
     },
     "Arguments_Preparation": {
@@ -38887,6 +227196,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -38916,6 +227226,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -38943,6 +227254,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -38972,6 +227284,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -39001,6 +227314,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -39030,6 +227344,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -39059,6 +227374,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -39088,6 +227404,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -39117,6 +227434,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -39145,6 +227463,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -39172,6 +227491,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -39200,6 +227520,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -39226,6 +227547,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -39249,6 +227571,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -39272,6 +227595,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -39296,6 +227620,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -39325,6 +227650,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -39352,6 +227678,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -39381,6 +227708,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -39410,6 +227738,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -39439,6 +227768,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -39468,6 +227798,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -39497,6 +227828,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -39525,6 +227857,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -39548,6 +227881,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -39571,6 +227905,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -39594,6 +227929,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -39617,6 +227953,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -39640,6 +227977,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -39663,6 +228001,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -39686,6 +228025,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -39709,6 +228049,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -39732,6 +228073,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -39755,6 +228097,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -39778,6 +228121,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -39801,6 +228145,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -39824,6 +228169,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -39847,6 +228193,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -39871,6 +228218,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -39900,6 +228248,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -39927,6 +228276,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -39956,6 +228306,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -39985,6 +228336,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -40014,6 +228366,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -40043,6 +228396,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -40072,6 +228426,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -40101,6 +228456,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -40129,6 +228485,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -40156,6 +228513,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -40184,6 +228542,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -40210,6 +228569,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -40233,6 +228593,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -40256,6 +228617,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -40280,6 +228642,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -40309,6 +228672,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -40336,6 +228700,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -40365,6 +228730,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -40394,6 +228760,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -40423,6 +228790,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -40452,6 +228820,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -40481,6 +228850,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -40509,6 +228879,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -40532,6 +228903,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -40555,6 +228927,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -40578,6 +228951,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -40601,6 +228975,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -40624,6 +228999,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -40647,6 +229023,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -40670,6 +229047,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -40693,6 +229071,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -40716,6 +229095,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -40739,6 +229119,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -40762,6 +229143,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -40785,6 +229167,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -40808,6 +229191,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -40831,6 +229215,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -40856,6 +229241,7 @@
       "float32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -40889,6 +229275,7 @@
       "float64x1_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -40921,6 +229308,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -40953,6 +229341,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -40991,6 +229380,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -41029,6 +229419,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -41067,6 +229458,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -41105,6 +229497,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -41135,6 +229528,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -41171,6 +229565,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -41207,6 +229602,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -41243,6 +229639,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -41278,6 +229675,7 @@
       "float32_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -41311,6 +229709,7 @@
       "int16_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -41344,6 +229743,7 @@
       "int32_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -41377,6 +229777,7 @@
       "uint16_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -41410,6 +229811,7 @@
       "uint32_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -41443,6 +229845,7 @@
       "int16x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -41476,6 +229879,7 @@
       "int32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -41509,6 +229913,7 @@
       "int8x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -41542,6 +229947,7 @@
       "uint16x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -41575,6 +229981,7 @@
       "uint32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -41608,6 +230015,7 @@
       "uint8x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -41642,6 +230050,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -41678,6 +230087,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -41714,6 +230124,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -41750,6 +230161,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -41786,6 +230198,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -41822,6 +230235,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -41858,6 +230272,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -41894,6 +230309,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -41929,6 +230345,7 @@
       "int16_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -41960,6 +230377,7 @@
       "int32_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -41991,6 +230409,7 @@
       "uint16_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -42022,6 +230441,7 @@
       "uint32_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -42053,6 +230473,7 @@
       "int16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -42084,6 +230505,7 @@
       "int32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -42115,6 +230537,7 @@
       "int8x16_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -42146,6 +230569,7 @@
       "uint16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -42177,6 +230601,7 @@
       "uint32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -42208,6 +230633,7 @@
       "uint8x16_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -42240,6 +230666,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -42278,6 +230705,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -42316,6 +230744,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -42354,6 +230783,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -42392,6 +230822,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -42428,6 +230859,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -42464,6 +230896,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -42500,6 +230933,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -42535,6 +230969,7 @@
       "int16_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -42568,6 +231003,7 @@
       "int32_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -42601,6 +231037,7 @@
       "uint16_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -42634,6 +231071,7 @@
       "uint32_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -42667,6 +231105,7 @@
       "int16x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -42700,6 +231139,7 @@
       "int32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -42733,6 +231173,7 @@
       "int8x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -42766,6 +231207,7 @@
       "uint16x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -42799,6 +231241,7 @@
       "uint32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -42832,6 +231275,7 @@
       "uint8x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -42865,6 +231309,7 @@
       "float32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -42898,6 +231343,7 @@
       "float64x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -42930,6 +231376,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -42962,6 +231409,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -43000,6 +231448,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -43038,6 +231487,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -43076,6 +231526,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -43114,6 +231565,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -43144,6 +231596,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -43180,6 +231633,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -43216,6 +231670,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -43252,6 +231707,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -43287,6 +231743,7 @@
       "float32_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -43320,6 +231777,7 @@
       "int16_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -43353,6 +231811,7 @@
       "int32_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -43386,6 +231845,7 @@
       "uint16_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -43419,6 +231879,7 @@
       "uint32_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -43452,6 +231913,7 @@
       "int16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -43485,6 +231947,7 @@
       "int32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -43518,6 +231981,7 @@
       "int8x16_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -43551,6 +232015,7 @@
       "uint16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -43584,6 +232049,7 @@
       "uint32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -43617,6 +232083,7 @@
       "uint8x16_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -43650,6 +232117,7 @@
       "float32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -43683,6 +232151,7 @@
       "float64x1_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -43715,6 +232184,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -43747,6 +232217,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -43785,6 +232256,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -43823,6 +232295,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -43861,6 +232334,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -43899,6 +232373,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -43929,6 +232404,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -43965,6 +232441,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -44001,6 +232478,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -44037,6 +232515,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -44072,6 +232551,7 @@
       "float32_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -44105,6 +232585,7 @@
       "int16_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -44138,6 +232619,7 @@
       "int32_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -44171,6 +232653,7 @@
       "uint16_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -44204,6 +232687,7 @@
       "uint32_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -44237,6 +232721,7 @@
       "int16x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -44270,6 +232755,7 @@
       "int32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -44303,6 +232789,7 @@
       "int8x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -44336,6 +232823,7 @@
       "uint16x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -44369,6 +232857,7 @@
       "uint32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -44402,6 +232891,7 @@
       "uint8x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -44436,6 +232926,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -44472,6 +232963,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -44508,6 +233000,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -44544,6 +233037,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -44580,6 +233074,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -44616,6 +233111,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -44652,6 +233148,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -44688,6 +233185,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -44723,6 +233221,7 @@
       "int16_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -44754,6 +233253,7 @@
       "int32_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -44785,6 +233285,7 @@
       "uint16_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -44816,6 +233317,7 @@
       "uint32_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -44847,6 +233349,7 @@
       "int16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -44878,6 +233381,7 @@
       "int32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -44909,6 +233413,7 @@
       "int8x16_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -44940,6 +233445,7 @@
       "uint16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -44971,6 +233477,7 @@
       "uint32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -45002,6 +233509,7 @@
       "uint8x16_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -45034,6 +233542,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -45072,6 +233581,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -45110,6 +233620,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -45148,6 +233659,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -45186,6 +233698,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -45222,6 +233735,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -45258,6 +233772,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -45294,6 +233809,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -45329,6 +233845,7 @@
       "int16_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -45362,6 +233879,7 @@
       "int32_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -45395,6 +233913,7 @@
       "uint16_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -45428,6 +233947,7 @@
       "uint32_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -45461,6 +233981,7 @@
       "int16x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -45494,6 +234015,7 @@
       "int32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -45527,6 +234049,7 @@
       "int8x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -45560,6 +234083,7 @@
       "uint16x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -45593,6 +234117,7 @@
       "uint32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -45626,6 +234151,7 @@
       "uint8x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -45659,6 +234185,7 @@
       "float32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -45692,6 +234219,7 @@
       "float64x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -45724,6 +234252,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -45756,6 +234285,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -45794,6 +234324,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -45832,6 +234363,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -45870,6 +234402,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -45908,6 +234441,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -45938,6 +234472,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -45974,6 +234509,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -46010,6 +234546,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -46046,6 +234583,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -46081,6 +234619,7 @@
       "float32_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -46114,6 +234653,7 @@
       "int16_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -46147,6 +234687,7 @@
       "int32_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -46180,6 +234721,7 @@
       "uint16_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -46213,6 +234755,7 @@
       "uint32_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -46246,6 +234789,7 @@
       "int16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -46279,6 +234823,7 @@
       "int32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -46312,6 +234857,7 @@
       "int8x16_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -46345,6 +234891,7 @@
       "uint16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -46378,6 +234925,7 @@
       "uint32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -46411,6 +234959,7 @@
       "uint8x16_t c"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -46444,6 +234993,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -46476,6 +235026,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -46506,6 +235057,7 @@
       "float32_t value"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -46531,6 +235083,7 @@
       "float64_t value"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -46554,6 +235107,7 @@
       "poly16_t value"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -46579,6 +235133,7 @@
       "poly8_t value"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -46604,6 +235159,7 @@
       "int16_t value"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -46629,6 +235185,7 @@
       "int32_t value"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -46654,6 +235211,7 @@
       "int64_t value"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -46679,6 +235237,7 @@
       "int8_t value"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -46704,6 +235263,7 @@
       "uint16_t value"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -46729,6 +235289,7 @@
       "uint32_t value"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -46754,6 +235315,7 @@
       "uint64_t value"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -46779,6 +235341,7 @@
       "uint8_t value"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -46804,6 +235367,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -46827,6 +235391,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -46850,6 +235415,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -46873,6 +235439,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -46896,6 +235463,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -46919,6 +235487,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -46942,6 +235511,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -46967,6 +235537,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -46992,6 +235563,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -47017,6 +235589,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -47042,6 +235615,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -47067,6 +235641,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -47093,6 +235668,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -47122,6 +235698,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -47151,6 +235728,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -47180,6 +235758,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -47209,6 +235788,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -47238,6 +235818,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -47266,6 +235847,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -47291,6 +235873,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -47316,6 +235899,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -47341,6 +235925,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -47366,6 +235951,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -47391,6 +235977,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -47416,6 +236003,7 @@
       "float32_t value"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -47441,6 +236029,7 @@
       "float64_t value"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -47464,6 +236053,7 @@
       "poly16_t value"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -47489,6 +236079,7 @@
       "poly8_t value"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -47514,6 +236105,7 @@
       "int16_t value"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -47539,6 +236131,7 @@
       "int32_t value"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -47564,6 +236157,7 @@
       "int64_t value"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -47589,6 +236183,7 @@
       "int8_t value"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -47614,6 +236209,7 @@
       "uint16_t value"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -47639,6 +236235,7 @@
       "uint32_t value"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -47664,6 +236261,7 @@
       "uint64_t value"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -47689,6 +236287,7 @@
       "uint8_t value"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -47715,6 +236314,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -47744,6 +236344,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -47772,6 +236373,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -47806,6 +236408,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -47838,6 +236441,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -47872,6 +236476,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -47906,6 +236511,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -47940,6 +236546,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -47974,6 +236581,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -48006,6 +236614,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -48038,6 +236647,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -48070,6 +236680,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -48102,6 +236713,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -48134,6 +236746,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -48165,6 +236778,7 @@
       "float32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -48194,6 +236808,7 @@
       "float64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -48221,6 +236836,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -48250,6 +236866,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -48279,6 +236896,7 @@
       "uint16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -48308,6 +236926,7 @@
       "uint32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -48337,6 +236956,7 @@
       "poly8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -48366,6 +236986,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -48395,6 +237016,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -48424,6 +237046,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -48453,6 +237076,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -48482,6 +237106,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -48511,6 +237136,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -48541,6 +237167,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -48573,6 +237200,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -48605,6 +237233,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -48637,6 +237266,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -48669,6 +237299,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -48701,6 +237332,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -48733,6 +237365,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -48765,6 +237398,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -48797,6 +237431,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -48829,6 +237464,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -48860,6 +237496,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -48887,6 +237524,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -48914,6 +237552,7 @@
       "uint16_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -48941,6 +237580,7 @@
       "uint32_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -48968,6 +237608,7 @@
       "poly64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "128",
       "value": "poly128_t"
     },
     "Arguments_Preparation": {
@@ -48996,6 +237637,7 @@
       "poly8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -49023,6 +237665,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -49050,6 +237693,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -49077,6 +237721,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -49104,6 +237749,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -49131,6 +237777,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -49158,6 +237805,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -49186,6 +237834,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -49220,6 +237869,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -49254,6 +237904,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -49288,6 +237939,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -49322,6 +237974,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -49354,6 +238007,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -49386,6 +238040,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -49418,6 +238073,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -49449,6 +238105,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -49478,6 +238135,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -49507,6 +238165,7 @@
       "uint16_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -49536,6 +238195,7 @@
       "uint32_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -49565,6 +238225,7 @@
       "poly64_t b"
     ],
     "return_type": {
+      "element_bit_size": "128",
       "value": "poly128_t"
     },
     "Arguments_Preparation": {
@@ -49593,6 +238254,7 @@
       "poly8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -49622,6 +238284,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -49651,6 +238314,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -49680,6 +238344,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -49709,6 +238374,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -49738,6 +238404,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -49767,6 +238434,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -49796,6 +238464,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -49825,6 +238494,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -49853,6 +238523,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -49887,6 +238558,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -49919,6 +238591,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -49953,6 +238626,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -49987,6 +238661,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -50021,6 +238696,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -50055,6 +238731,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -50087,6 +238764,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -50119,6 +238797,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -50151,6 +238830,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -50183,6 +238863,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -50215,6 +238896,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -50246,6 +238928,7 @@
       "float32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -50275,6 +238958,7 @@
       "float64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -50302,6 +238986,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -50331,6 +239016,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -50360,6 +239046,7 @@
       "uint16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -50389,6 +239076,7 @@
       "uint32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -50418,6 +239106,7 @@
       "poly8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -50447,6 +239136,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -50476,6 +239166,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -50505,6 +239196,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -50534,6 +239226,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -50563,6 +239256,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -50592,6 +239286,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -50622,6 +239317,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -50654,6 +239350,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -50685,6 +239382,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -50712,6 +239410,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -50740,6 +239439,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -50772,6 +239472,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -50804,6 +239505,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -50836,6 +239538,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -50867,6 +239570,7 @@
       "float64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -50895,6 +239599,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -50927,6 +239632,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -50958,6 +239664,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -50985,6 +239692,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -51013,6 +239721,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -51045,6 +239754,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -51077,6 +239787,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -51109,6 +239820,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -51140,6 +239852,7 @@
       "float32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -51168,6 +239881,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -51200,6 +239914,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -51230,6 +239945,7 @@
       "poly8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -51255,6 +239971,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -51280,6 +239997,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -51305,6 +240023,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -51330,6 +240049,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -51355,6 +240075,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -51380,6 +240101,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -51405,6 +240127,7 @@
       "poly8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -51430,6 +240153,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -51455,6 +240179,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -51480,6 +240205,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -51505,6 +240231,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -51530,6 +240257,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -51555,6 +240283,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -51580,6 +240309,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -51605,6 +240335,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -51628,6 +240359,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -51653,6 +240385,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -51678,6 +240411,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -51701,6 +240435,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -51726,6 +240461,7 @@
       "int64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -51749,6 +240485,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -51774,6 +240511,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -51797,6 +240535,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -51822,6 +240561,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -51847,6 +240587,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -51870,6 +240611,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -51896,6 +240638,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -51925,6 +240668,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -51954,6 +240698,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -51983,6 +240728,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -52012,6 +240758,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -52041,6 +240788,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -52070,6 +240818,7 @@
       "uint64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -52099,6 +240848,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -52128,6 +240878,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -52157,6 +240908,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -52186,6 +240938,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -52215,6 +240968,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -52244,6 +240998,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -52273,6 +241028,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -52302,6 +241058,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -52331,6 +241088,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -52360,6 +241118,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -52389,6 +241148,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -52418,6 +241178,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -52447,6 +241208,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -52476,6 +241238,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -52505,6 +241268,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -52534,6 +241298,7 @@
       "uint64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -52563,6 +241328,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -52592,6 +241358,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -52621,6 +241388,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -52650,6 +241418,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -52679,6 +241448,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -52708,6 +241478,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -52737,6 +241508,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -52766,6 +241538,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -52795,6 +241568,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -52824,6 +241598,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -52853,6 +241628,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -52882,6 +241658,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -52911,6 +241688,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -52940,6 +241718,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -52969,6 +241748,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -52998,6 +241778,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -53027,6 +241808,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -53056,6 +241838,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -53085,6 +241868,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -53114,6 +241898,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -53143,6 +241928,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -53172,6 +241958,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -53201,6 +241988,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -53230,6 +242018,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -53259,6 +242048,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -53288,6 +242078,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -53317,6 +242108,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -53346,6 +242138,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -53374,6 +242167,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -53397,6 +242191,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -53420,6 +242215,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -53443,6 +242239,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -53468,6 +242265,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -53493,6 +242291,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -53518,6 +242317,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -53543,6 +242343,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -53568,6 +242369,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -53593,6 +242395,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -53618,6 +242421,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -53643,6 +242447,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -53668,6 +242473,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -53693,6 +242499,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -53718,6 +242525,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -53744,6 +242552,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -53771,6 +242580,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -53798,6 +242608,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -53825,6 +242636,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -53852,6 +242664,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -53879,6 +242692,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -53906,6 +242720,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -53933,6 +242748,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -53960,6 +242776,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -53987,6 +242804,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -54013,6 +242831,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -54037,6 +242856,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -54066,6 +242886,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -54095,6 +242916,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -54124,6 +242946,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -54153,6 +242976,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -54182,6 +243006,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -54211,6 +243036,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -54240,6 +243066,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -54267,6 +243094,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -54294,6 +243122,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -54320,6 +243149,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -54343,6 +243173,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -54367,6 +243198,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -54394,6 +243226,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -54421,6 +243254,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -54448,6 +243282,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -54475,6 +243310,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -54502,6 +243338,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -54529,6 +243366,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -54556,6 +243394,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -54582,6 +243421,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -54605,6 +243445,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -54629,6 +243470,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -54658,6 +243500,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -54687,6 +243530,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -54716,6 +243560,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -54745,6 +243590,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -54774,6 +243620,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -54803,6 +243650,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -54832,6 +243680,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -54859,6 +243708,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -54886,6 +243736,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -54912,6 +243763,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -54935,6 +243787,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -54959,6 +243812,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -54986,6 +243840,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -55013,6 +243868,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -55040,6 +243896,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -55067,6 +243924,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -55094,6 +243952,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -55121,6 +243980,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -55148,6 +244008,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -55174,6 +244035,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -55197,6 +244059,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -55220,6 +244083,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -55245,6 +244109,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -55270,6 +244135,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -55293,6 +244159,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -55318,6 +244185,7 @@
       "int8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -55341,6 +244209,7 @@
       "int64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -55364,6 +244233,7 @@
       "int16_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -55387,6 +244257,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -55412,6 +244283,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -55437,6 +244309,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -55460,6 +244333,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -55485,6 +244359,7 @@
       "int32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -55509,6 +244384,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -55538,6 +244414,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -55567,6 +244444,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -55596,6 +244474,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -55625,6 +244504,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -55654,6 +244534,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -55683,6 +244564,7 @@
       "uint64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -55712,6 +244594,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -55741,6 +244624,7 @@
       "int8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -55768,6 +244652,7 @@
       "uint8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -55795,6 +244680,7 @@
       "int64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -55822,6 +244708,7 @@
       "uint64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -55849,6 +244736,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -55876,6 +244764,7 @@
       "uint16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -55903,6 +244792,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -55932,6 +244822,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -55961,6 +244852,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -55990,6 +244882,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -56019,6 +244912,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -56048,6 +244942,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -56077,6 +244972,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -56106,6 +245002,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -56135,6 +245032,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -56162,6 +245060,7 @@
       "uint32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -56191,6 +245090,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -56227,6 +245127,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -56263,6 +245164,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -56299,6 +245201,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -56334,6 +245237,7 @@
       "int16_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -56365,6 +245269,7 @@
       "int32_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -56396,6 +245301,7 @@
       "int16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -56427,6 +245333,7 @@
       "int32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -56459,6 +245366,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -56497,6 +245405,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -56535,6 +245444,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -56571,6 +245481,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -56606,6 +245517,7 @@
       "int16_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -56639,6 +245551,7 @@
       "int32_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -56672,6 +245585,7 @@
       "int16x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -56705,6 +245619,7 @@
       "int32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -56739,6 +245654,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -56775,6 +245691,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -56810,6 +245727,7 @@
       "int16_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -56842,6 +245760,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -56878,6 +245797,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -56913,6 +245833,7 @@
       "int32_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -56945,6 +245866,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -56981,6 +245903,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -57017,6 +245940,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -57053,6 +245977,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -57088,6 +246013,7 @@
       "int16_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -57119,6 +246045,7 @@
       "int32_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -57150,6 +246077,7 @@
       "int16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -57181,6 +246109,7 @@
       "int32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -57213,6 +246142,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -57251,6 +246181,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -57289,6 +246220,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -57325,6 +246257,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -57360,6 +246293,7 @@
       "int16_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -57393,6 +246327,7 @@
       "int32_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -57426,6 +246361,7 @@
       "int16x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -57459,6 +246395,7 @@
       "int32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -57493,6 +246430,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -57529,6 +246467,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -57564,6 +246503,7 @@
       "int16_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -57596,6 +246536,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -57632,6 +246573,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -57667,6 +246609,7 @@
       "int32_t c"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -57698,6 +246641,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -57732,6 +246676,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -57766,6 +246711,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -57798,6 +246744,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -57829,6 +246776,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -57858,6 +246806,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -57887,6 +246836,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -57916,6 +246866,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -57946,6 +246897,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -57978,6 +246930,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -58009,6 +246962,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -58037,6 +246991,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -58071,6 +247026,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -58105,6 +247061,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -58137,6 +247094,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -58168,6 +247126,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -58197,6 +247156,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -58226,6 +247186,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -58255,6 +247216,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -58285,6 +247247,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -58317,6 +247280,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -58348,6 +247312,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -58376,6 +247341,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -58408,6 +247374,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -58440,6 +247407,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -58472,6 +247440,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -58503,6 +247472,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -58530,6 +247500,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -58557,6 +247528,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -58584,6 +247556,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -58612,6 +247585,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -58646,6 +247620,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -58680,6 +247655,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -58712,6 +247688,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -58743,6 +247720,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -58772,6 +247750,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -58801,6 +247780,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -58830,6 +247810,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -58860,6 +247841,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -58892,6 +247874,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -58923,6 +247906,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -58951,6 +247935,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -58983,6 +247968,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -59014,6 +248000,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -59041,6 +248028,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -59068,6 +248056,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -59095,6 +248084,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -59122,6 +248112,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -59149,6 +248140,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -59176,6 +248168,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -59202,6 +248195,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -59227,6 +248221,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -59252,6 +248247,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -59277,6 +248273,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -59302,6 +248299,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -59327,6 +248325,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -59352,6 +248351,7 @@
       "int64_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -59375,6 +248375,7 @@
       "uint64_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -59398,6 +248399,7 @@
       "int16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -59421,6 +248423,7 @@
       "uint16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -59444,6 +248447,7 @@
       "int32_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -59467,6 +248471,7 @@
       "uint32_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -59491,6 +248496,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -59518,6 +248524,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -59545,6 +248552,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -59571,6 +248579,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -59596,6 +248605,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -59621,6 +248631,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -59646,6 +248657,7 @@
       "int64_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -59669,6 +248681,7 @@
       "int16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -59692,6 +248705,7 @@
       "int32_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -59715,6 +248729,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -59740,6 +248755,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -59765,6 +248781,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -59788,6 +248805,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -59813,6 +248831,7 @@
       "int8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -59836,6 +248855,7 @@
       "int64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -59859,6 +248879,7 @@
       "int16_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -59882,6 +248903,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -59907,6 +248929,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -59932,6 +248955,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -59955,6 +248979,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -59980,6 +249005,7 @@
       "int32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -60006,6 +249032,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -60042,6 +249069,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -60078,6 +249106,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -60114,6 +249143,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -60149,6 +249179,7 @@
       "int16x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -60180,6 +249211,7 @@
       "int32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -60212,6 +249244,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -60248,6 +249281,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -60283,6 +249317,7 @@
       "int16_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -60315,6 +249350,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -60351,6 +249387,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -60387,6 +249424,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -60423,6 +249461,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -60458,6 +249497,7 @@
       "int16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -60489,6 +249529,7 @@
       "int32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -60521,6 +249562,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -60557,6 +249599,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -60592,6 +249635,7 @@
       "int32_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -60624,6 +249668,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -60660,6 +249705,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -60696,6 +249742,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -60732,6 +249779,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -60767,6 +249815,7 @@
       "int16x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -60798,6 +249847,7 @@
       "int32x2_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -60830,6 +249880,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -60866,6 +249917,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -60901,6 +249953,7 @@
       "int16_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -60933,6 +249986,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -60969,6 +250023,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -61005,6 +250060,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -61041,6 +250097,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -61076,6 +250133,7 @@
       "int16x8_t c"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -61107,6 +250165,7 @@
       "int32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -61139,6 +250198,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -61175,6 +250235,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -61210,6 +250271,7 @@
       "int32_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -61241,6 +250303,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -61275,6 +250338,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -61309,6 +250373,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -61341,6 +250406,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -61372,6 +250438,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -61401,6 +250468,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -61430,6 +250498,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -61459,6 +250528,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -61489,6 +250559,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -61521,6 +250592,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -61552,6 +250624,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -61580,6 +250653,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -61614,6 +250688,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -61648,6 +250723,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -61680,6 +250756,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -61711,6 +250788,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -61740,6 +250818,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -61769,6 +250848,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -61798,6 +250878,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -61828,6 +250909,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -61860,6 +250942,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -61891,6 +250974,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -61918,6 +251002,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -61947,6 +251032,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -61976,6 +251062,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -62005,6 +251092,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -62034,6 +251122,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -62063,6 +251152,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -62092,6 +251182,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -62121,6 +251212,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -62150,6 +251242,7 @@
       "int8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -62177,6 +251270,7 @@
       "int8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -62204,6 +251298,7 @@
       "int64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -62231,6 +251326,7 @@
       "int64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -62258,6 +251354,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -62285,6 +251382,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -62312,6 +251410,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -62341,6 +251440,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -62370,6 +251470,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -62399,6 +251500,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -62428,6 +251530,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -62457,6 +251560,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -62486,6 +251590,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -62515,6 +251620,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -62544,6 +251650,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -62571,6 +251678,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -62599,6 +251707,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -62631,6 +251740,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -62663,6 +251773,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -62695,6 +251806,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -62727,6 +251839,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -62759,6 +251872,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -62790,6 +251904,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -62820,6 +251935,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -62850,6 +251966,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -62880,6 +251997,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -62910,6 +252028,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -62940,6 +252059,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -62970,6 +252090,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -62998,6 +252119,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -63026,6 +252148,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -63054,6 +252177,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -63082,6 +252206,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -63110,6 +252235,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -63139,6 +252265,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -63171,6 +252298,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -63203,6 +252331,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -63234,6 +252363,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -63264,6 +252394,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -63294,6 +252425,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -63324,6 +252456,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -63352,6 +252485,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -63380,6 +252514,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -63408,6 +252543,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -63438,6 +252574,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -63468,6 +252605,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -63498,6 +252636,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -63528,6 +252667,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -63558,6 +252698,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -63588,6 +252729,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -63618,6 +252760,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -63648,6 +252791,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -63677,6 +252821,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -63706,6 +252851,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -63735,6 +252881,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -63764,6 +252911,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -63793,6 +252941,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -63822,6 +252971,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -63851,6 +253001,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -63880,6 +253031,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -63908,6 +253060,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -63936,6 +253089,7 @@
       "int8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -63963,6 +253117,7 @@
       "int8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -63990,6 +253145,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -64018,6 +253174,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -64046,6 +253203,7 @@
       "int64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -64073,6 +253231,7 @@
       "int64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -64100,6 +253259,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -64128,6 +253288,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -64156,6 +253317,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -64183,6 +253345,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -64210,6 +253373,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -64240,6 +253404,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -64270,6 +253435,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -64300,6 +253466,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -64330,6 +253497,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -64360,6 +253528,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -64390,6 +253559,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -64420,6 +253590,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -64450,6 +253621,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -64479,6 +253651,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -64508,6 +253681,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -64537,6 +253711,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -64566,6 +253741,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -64595,6 +253771,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -64624,6 +253801,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -64653,6 +253831,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -64682,6 +253861,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -64710,6 +253890,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -64738,6 +253919,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -64765,6 +253947,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -64792,6 +253975,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -64822,6 +254006,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -64852,6 +254037,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -64882,6 +254068,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -64912,6 +254099,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -64940,6 +254128,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -64968,6 +254157,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -64996,6 +254186,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -65026,6 +254217,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -65056,6 +254248,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -65086,6 +254279,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -65116,6 +254310,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -65145,6 +254340,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -65177,6 +254373,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -65209,6 +254406,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -65241,6 +254439,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -65273,6 +254472,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -65305,6 +254505,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -65336,6 +254537,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -65366,6 +254568,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -65396,6 +254599,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -65426,6 +254630,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -65456,6 +254661,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -65486,6 +254692,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -65516,6 +254723,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -65544,6 +254752,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -65572,6 +254781,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -65600,6 +254810,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -65628,6 +254839,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -65656,6 +254868,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -65685,6 +254898,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -65717,6 +254931,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -65749,6 +254964,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -65780,6 +254996,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -65810,6 +255027,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -65840,6 +255058,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -65870,6 +255089,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -65898,6 +255118,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -65926,6 +255147,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -65954,6 +255176,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -65983,6 +255206,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -66012,6 +255236,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -66041,6 +255266,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -66070,6 +255296,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -66099,6 +255326,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -66128,6 +255356,7 @@
       "uint64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -66157,6 +255386,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -66186,6 +255416,7 @@
       "int8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -66213,6 +255444,7 @@
       "uint8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -66240,6 +255472,7 @@
       "int64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -66267,6 +255500,7 @@
       "uint64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -66294,6 +255528,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -66321,6 +255556,7 @@
       "uint16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -66348,6 +255584,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -66377,6 +255614,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -66406,6 +255644,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -66435,6 +255674,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -66464,6 +255704,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -66493,6 +255734,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -66522,6 +255764,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -66551,6 +255794,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -66580,6 +255824,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -66607,6 +255852,7 @@
       "uint32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -66634,6 +255880,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -66661,6 +255908,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -66688,6 +255936,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -66715,6 +255964,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -66742,6 +255992,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -66769,6 +256020,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -66796,6 +256048,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -66823,6 +256076,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -66850,6 +256104,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -66877,6 +256132,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -66904,6 +256160,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -66931,6 +256188,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -66958,6 +256216,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -66985,6 +256244,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -67012,6 +256272,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -67039,6 +256300,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -67066,6 +256328,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -67093,6 +256356,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -67120,6 +256384,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -67147,6 +256412,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -67174,6 +256440,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -67201,6 +256468,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -67228,6 +256496,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -67255,6 +256524,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -67283,6 +256553,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -67314,6 +256585,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -67345,6 +256617,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -67376,6 +256649,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -67407,6 +256681,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -67438,6 +256713,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -67469,6 +256745,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -67500,6 +256777,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -67531,6 +256809,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -67562,6 +256841,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -67593,6 +256873,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -67624,6 +256905,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -67655,6 +256937,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -67686,6 +256969,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -67717,6 +257001,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -67748,6 +257033,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -67779,6 +257065,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -67810,6 +257097,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -67841,6 +257129,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -67872,6 +257161,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -67903,6 +257193,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -67934,6 +257225,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -67965,6 +257257,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -67996,6 +257289,7 @@
       "uint8x16_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -68027,6 +257321,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -68058,6 +257353,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -68089,6 +257385,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -68120,6 +257417,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -68151,6 +257449,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -68182,6 +257481,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -68212,6 +257512,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -68241,6 +257542,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -68270,6 +257572,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -68299,6 +257602,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -68328,6 +257632,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -68357,6 +257662,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -68386,6 +257692,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -68410,6 +257717,7 @@
       "poly8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -68433,6 +257741,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -68456,6 +257765,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -68479,6 +257789,7 @@
       "poly8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -68502,6 +257813,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -68525,6 +257837,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -68548,6 +257861,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -68573,6 +257887,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -68596,6 +257911,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -68621,6 +257937,7 @@
       "float64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -68644,6 +257961,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -68669,6 +257987,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -68692,6 +258011,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -68717,6 +258037,7 @@
       "float32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -68741,6 +258062,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -68770,6 +258092,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -68797,6 +258120,7 @@
       "float64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -68824,6 +258148,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -68853,6 +258178,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -68880,6 +258206,7 @@
       "float32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -68906,6 +258233,7 @@
       "float64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -68929,6 +258257,7 @@
       "float32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -68952,6 +258281,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -68975,6 +258305,7 @@
       "poly16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -69000,6 +258331,7 @@
       "poly8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -69025,6 +258357,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -69050,6 +258383,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -69075,6 +258409,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -69100,6 +258435,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -69125,6 +258461,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -69150,6 +258487,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -69175,6 +258513,7 @@
       "uint64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -69200,6 +258539,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -69225,6 +258565,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -69248,6 +258589,7 @@
       "poly16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -69271,6 +258613,7 @@
       "poly64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -69294,6 +258637,7 @@
       "poly8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -69317,6 +258661,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -69340,6 +258685,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -69363,6 +258709,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -69386,6 +258733,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -69409,6 +258757,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -69432,6 +258781,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -69455,6 +258805,7 @@
       "uint64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -69478,6 +258829,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -69501,6 +258853,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -69526,6 +258879,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -69549,6 +258903,7 @@
       "poly64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -69573,6 +258928,7 @@
       "poly8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -69598,6 +258954,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -69623,6 +258980,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -69648,6 +259006,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -69673,6 +259032,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -69698,6 +259058,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -69723,6 +259084,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -69748,6 +259110,7 @@
       "uint64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -69773,6 +259136,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -69798,6 +259162,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -69822,6 +259187,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -69845,6 +259211,7 @@
       "poly16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -69869,6 +259236,7 @@
       "poly8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -69893,6 +259261,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -69917,6 +259286,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -69941,6 +259311,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -69965,6 +259336,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -69989,6 +259361,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -70013,6 +259386,7 @@
       "uint64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -70037,6 +259411,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -70061,6 +259436,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -70086,6 +259462,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -70109,6 +259486,7 @@
       "poly16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -70134,6 +259512,7 @@
       "poly64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -70158,6 +259537,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -70183,6 +259563,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -70208,6 +259589,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -70233,6 +259615,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -70258,6 +259641,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -70283,6 +259667,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -70308,6 +259693,7 @@
       "uint64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -70333,6 +259719,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -70358,6 +259745,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -70383,6 +259771,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -70406,6 +259795,7 @@
       "poly16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -70431,6 +259821,7 @@
       "poly64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -70455,6 +259846,7 @@
       "poly8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -70480,6 +259872,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -70505,6 +259898,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -70530,6 +259924,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -70555,6 +259950,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -70580,6 +259976,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -70605,6 +260002,7 @@
       "uint64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -70630,6 +260028,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -70655,6 +260054,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -70680,6 +260080,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -70703,6 +260104,7 @@
       "poly16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -70728,6 +260130,7 @@
       "poly64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -70752,6 +260155,7 @@
       "poly8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -70777,6 +260181,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -70802,6 +260207,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -70827,6 +260233,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -70852,6 +260259,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -70877,6 +260285,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -70902,6 +260311,7 @@
       "uint64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -70927,6 +260337,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -70952,6 +260363,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -70977,6 +260389,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -71000,6 +260413,7 @@
       "poly16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -71025,6 +260439,7 @@
       "poly64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -71049,6 +260464,7 @@
       "poly8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -71074,6 +260490,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -71099,6 +260516,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -71124,6 +260542,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -71149,6 +260568,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -71174,6 +260594,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -71199,6 +260620,7 @@
       "uint64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -71224,6 +260646,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -71249,6 +260672,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -71274,6 +260698,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -71297,6 +260722,7 @@
       "poly16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -71322,6 +260748,7 @@
       "poly64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -71346,6 +260773,7 @@
       "poly8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -71371,6 +260799,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -71396,6 +260825,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -71421,6 +260851,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -71446,6 +260877,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -71471,6 +260903,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -71496,6 +260929,7 @@
       "uint64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -71521,6 +260955,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -71546,6 +260981,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -71571,6 +261007,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -71594,6 +261031,7 @@
       "poly16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -71619,6 +261057,7 @@
       "poly64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -71643,6 +261082,7 @@
       "poly8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -71668,6 +261108,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -71693,6 +261134,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -71718,6 +261160,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -71743,6 +261186,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -71768,6 +261212,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -71793,6 +261238,7 @@
       "uint64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -71818,6 +261264,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -71843,6 +261290,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -71868,6 +261316,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -71891,6 +261340,7 @@
       "poly16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -71916,6 +261366,7 @@
       "poly64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -71940,6 +261391,7 @@
       "poly8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -71965,6 +261417,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -71990,6 +261443,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -72015,6 +261469,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -72040,6 +261495,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -72065,6 +261521,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -72090,6 +261547,7 @@
       "uint64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -72115,6 +261573,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -72140,6 +261599,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -72165,6 +261625,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -72188,6 +261649,7 @@
       "poly16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -72213,6 +261675,7 @@
       "poly64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -72237,6 +261700,7 @@
       "poly8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -72262,6 +261726,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -72287,6 +261752,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -72312,6 +261778,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -72337,6 +261804,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -72362,6 +261830,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -72387,6 +261856,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -72412,6 +261882,7 @@
       "uint8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -72437,6 +261908,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -72462,6 +261934,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -72485,6 +261958,7 @@
       "poly16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -72510,6 +261984,7 @@
       "poly64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -72534,6 +262009,7 @@
       "poly8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -72559,6 +262035,7 @@
       "int16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -72584,6 +262061,7 @@
       "int32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -72609,6 +262087,7 @@
       "int64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -72634,6 +262113,7 @@
       "int8x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -72659,6 +262139,7 @@
       "uint16x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -72684,6 +262165,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -72709,6 +262191,7 @@
       "uint64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -72734,6 +262217,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -72757,6 +262241,7 @@
       "poly16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -72782,6 +262267,7 @@
       "poly8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -72807,6 +262293,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -72832,6 +262319,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -72857,6 +262345,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -72882,6 +262371,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -72907,6 +262397,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -72932,6 +262423,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -72957,6 +262449,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -72982,6 +262475,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -73007,6 +262501,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -73030,6 +262525,7 @@
       "poly128_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -73053,6 +262549,7 @@
       "poly16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -73076,6 +262573,7 @@
       "poly64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -73099,6 +262597,7 @@
       "poly8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -73122,6 +262621,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -73145,6 +262645,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -73168,6 +262669,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -73191,6 +262693,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -73214,6 +262717,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -73237,6 +262741,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -73260,6 +262765,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -73285,6 +262791,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -73308,6 +262815,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "128",
       "value": "poly128_t"
     },
     "Arguments_Preparation": {
@@ -73332,6 +262840,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "128",
       "value": "poly128_t"
     },
     "Arguments_Preparation": {
@@ -73355,6 +262864,7 @@
       "poly16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "128",
       "value": "poly128_t"
     },
     "Arguments_Preparation": {
@@ -73379,6 +262889,7 @@
       "poly8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "128",
       "value": "poly128_t"
     },
     "Arguments_Preparation": {
@@ -73403,6 +262914,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "128",
       "value": "poly128_t"
     },
     "Arguments_Preparation": {
@@ -73427,6 +262939,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "128",
       "value": "poly128_t"
     },
     "Arguments_Preparation": {
@@ -73451,6 +262964,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "128",
       "value": "poly128_t"
     },
     "Arguments_Preparation": {
@@ -73475,6 +262989,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "128",
       "value": "poly128_t"
     },
     "Arguments_Preparation": {
@@ -73499,6 +263014,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "128",
       "value": "poly128_t"
     },
     "Arguments_Preparation": {
@@ -73523,6 +263039,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "128",
       "value": "poly128_t"
     },
     "Arguments_Preparation": {
@@ -73547,6 +263064,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "128",
       "value": "poly128_t"
     },
     "Arguments_Preparation": {
@@ -73571,6 +263089,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "128",
       "value": "poly128_t"
     },
     "Arguments_Preparation": {
@@ -73595,6 +263114,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -73620,6 +263140,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -73643,6 +263164,7 @@
       "poly128_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -73667,6 +263189,7 @@
       "poly64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -73691,6 +263214,7 @@
       "poly8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -73716,6 +263240,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -73741,6 +263266,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -73766,6 +263292,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -73791,6 +263318,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -73816,6 +263344,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -73841,6 +263370,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -73866,6 +263396,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -73891,6 +263422,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -73916,6 +263448,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -73940,6 +263473,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -73963,6 +263497,7 @@
       "poly16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -73987,6 +263522,7 @@
       "poly8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -74011,6 +263547,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -74035,6 +263572,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -74059,6 +263597,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -74083,6 +263622,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -74107,6 +263647,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -74131,6 +263672,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -74155,6 +263697,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -74179,6 +263722,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -74203,6 +263747,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -74228,6 +263773,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -74251,6 +263797,7 @@
       "poly128_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -74275,6 +263822,7 @@
       "poly16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -74300,6 +263848,7 @@
       "poly64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -74324,6 +263873,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -74349,6 +263899,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -74374,6 +263925,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -74399,6 +263951,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -74424,6 +263977,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -74449,6 +264003,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -74474,6 +264029,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -74499,6 +264055,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -74524,6 +264081,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -74549,6 +264107,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -74572,6 +264131,7 @@
       "poly128_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -74596,6 +264156,7 @@
       "poly16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -74621,6 +264182,7 @@
       "poly64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -74645,6 +264207,7 @@
       "poly8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -74670,6 +264233,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -74695,6 +264259,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -74720,6 +264285,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -74745,6 +264311,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -74770,6 +264337,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -74795,6 +264363,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -74820,6 +264389,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -74845,6 +264415,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -74870,6 +264441,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -74893,6 +264465,7 @@
       "poly128_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -74917,6 +264490,7 @@
       "poly16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -74942,6 +264516,7 @@
       "poly64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -74966,6 +264541,7 @@
       "poly8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -74991,6 +264567,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -75016,6 +264593,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -75041,6 +264619,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -75066,6 +264645,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -75091,6 +264671,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -75116,6 +264697,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -75141,6 +264723,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -75166,6 +264749,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -75191,6 +264775,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -75214,6 +264799,7 @@
       "poly128_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -75238,6 +264824,7 @@
       "poly16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -75263,6 +264850,7 @@
       "poly64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -75287,6 +264875,7 @@
       "poly8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -75312,6 +264901,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -75337,6 +264927,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -75362,6 +264953,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -75387,6 +264979,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -75412,6 +265005,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -75437,6 +265031,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -75462,6 +265057,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -75487,6 +265083,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -75512,6 +265109,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -75535,6 +265133,7 @@
       "poly128_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -75559,6 +265158,7 @@
       "poly16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -75584,6 +265184,7 @@
       "poly64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -75608,6 +265209,7 @@
       "poly8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -75633,6 +265235,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -75658,6 +265261,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -75683,6 +265287,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -75708,6 +265313,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -75733,6 +265339,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -75758,6 +265365,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -75783,6 +265391,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -75808,6 +265417,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -75833,6 +265443,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -75856,6 +265467,7 @@
       "poly128_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -75880,6 +265492,7 @@
       "poly16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -75905,6 +265518,7 @@
       "poly64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -75929,6 +265543,7 @@
       "poly8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -75954,6 +265569,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -75979,6 +265595,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -76004,6 +265621,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -76029,6 +265647,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -76054,6 +265673,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -76079,6 +265699,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -76104,6 +265725,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -76129,6 +265751,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -76154,6 +265777,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -76177,6 +265801,7 @@
       "poly128_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -76201,6 +265826,7 @@
       "poly16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -76226,6 +265852,7 @@
       "poly64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -76250,6 +265877,7 @@
       "poly8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -76275,6 +265903,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -76300,6 +265929,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -76325,6 +265955,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -76350,6 +265981,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -76375,6 +266007,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -76400,6 +266033,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -76425,6 +266059,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -76450,6 +266085,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -76475,6 +266111,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -76498,6 +266135,7 @@
       "poly128_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -76522,6 +266160,7 @@
       "poly16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -76547,6 +266186,7 @@
       "poly64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -76571,6 +266211,7 @@
       "poly8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -76596,6 +266237,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -76621,6 +266263,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -76646,6 +266289,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -76671,6 +266315,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -76696,6 +266341,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -76721,6 +266367,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -76746,6 +266393,7 @@
       "uint8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -76771,6 +266419,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -76796,6 +266445,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -76819,6 +266469,7 @@
       "poly128_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -76843,6 +266494,7 @@
       "poly16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -76868,6 +266520,7 @@
       "poly64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -76892,6 +266545,7 @@
       "poly8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -76917,6 +266571,7 @@
       "int16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -76942,6 +266597,7 @@
       "int32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -76967,6 +266623,7 @@
       "int64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -76992,6 +266649,7 @@
       "int8x16_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -77017,6 +266675,7 @@
       "uint16x8_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -77042,6 +266701,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -77067,6 +266727,7 @@
       "uint64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -77092,6 +266753,7 @@
       "poly8x8_t vec"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -77117,6 +266779,7 @@
       "int8x8_t vec"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -77142,6 +266805,7 @@
       "uint8x8_t vec"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -77167,6 +266831,7 @@
       "poly8x16_t vec"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -77192,6 +266857,7 @@
       "int8x16_t vec"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -77217,6 +266883,7 @@
       "uint8x16_t vec"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -77242,6 +266909,7 @@
       "poly16x4_t vec"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -77267,6 +266935,7 @@
       "poly8x8_t vec"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -77292,6 +266961,7 @@
       "int16x4_t vec"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -77317,6 +266987,7 @@
       "int8x8_t vec"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -77342,6 +267013,7 @@
       "uint16x4_t vec"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -77367,6 +267039,7 @@
       "uint8x8_t vec"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -77392,6 +267065,7 @@
       "poly16x8_t vec"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -77417,6 +267091,7 @@
       "poly8x16_t vec"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -77442,6 +267117,7 @@
       "int16x8_t vec"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -77467,6 +267143,7 @@
       "int8x16_t vec"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -77492,6 +267169,7 @@
       "uint16x8_t vec"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -77517,6 +267195,7 @@
       "uint8x16_t vec"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -77542,6 +267221,7 @@
       "float32x2_t vec"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -77567,6 +267247,7 @@
       "poly16x4_t vec"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -77592,6 +267273,7 @@
       "poly8x8_t vec"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -77617,6 +267299,7 @@
       "int16x4_t vec"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -77642,6 +267325,7 @@
       "int32x2_t vec"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -77667,6 +267351,7 @@
       "int8x8_t vec"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -77692,6 +267377,7 @@
       "uint16x4_t vec"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -77717,6 +267403,7 @@
       "uint32x2_t vec"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -77742,6 +267429,7 @@
       "uint8x8_t vec"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -77767,6 +267455,7 @@
       "float32x4_t vec"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -77792,6 +267481,7 @@
       "poly16x8_t vec"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -77817,6 +267507,7 @@
       "poly8x16_t vec"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -77842,6 +267533,7 @@
       "int16x8_t vec"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -77867,6 +267559,7 @@
       "int32x4_t vec"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -77892,6 +267585,7 @@
       "int8x16_t vec"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -77917,6 +267611,7 @@
       "uint16x8_t vec"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -77942,6 +267637,7 @@
       "uint32x4_t vec"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -77967,6 +267663,7 @@
       "uint8x16_t vec"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -77993,6 +267690,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -78022,6 +267720,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -78051,6 +267750,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -78080,6 +267780,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -78109,6 +267810,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -78138,6 +267840,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -78167,6 +267870,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -78196,6 +267900,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -78225,6 +267930,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -78254,6 +267960,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -78283,6 +267990,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -78312,6 +268020,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -78340,6 +268049,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -78363,6 +268073,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -78386,6 +268097,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -78409,6 +268121,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -78432,6 +268145,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -78455,6 +268169,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -78478,6 +268193,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -78501,6 +268217,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -78524,6 +268241,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -78547,6 +268265,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -78570,6 +268289,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -78593,6 +268313,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -78616,6 +268337,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -78639,6 +268361,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -78662,6 +268385,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -78685,6 +268409,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -78708,6 +268433,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -78732,6 +268458,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -78755,6 +268482,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -78779,6 +268507,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -78802,6 +268531,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -78826,6 +268556,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -78849,6 +268580,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -78873,6 +268605,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -78896,6 +268629,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -78920,6 +268654,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -78943,6 +268678,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -78967,6 +268703,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -78990,6 +268727,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -79014,6 +268752,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -79037,6 +268776,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -79061,6 +268801,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -79085,6 +268826,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -79109,6 +268851,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -79133,6 +268876,7 @@
       "float32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -79157,6 +268901,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -79181,6 +268926,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -79204,6 +268950,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -79228,6 +268975,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -79251,6 +268999,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -79275,6 +269024,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -79298,6 +269048,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -79322,6 +269073,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -79345,6 +269097,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -79369,6 +269122,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -79393,6 +269147,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -79422,6 +269177,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -79451,6 +269207,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -79480,6 +269237,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -79509,6 +269267,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -79538,6 +269297,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -79567,6 +269327,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -79596,6 +269357,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -79625,6 +269387,7 @@
       "int64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -79652,6 +269415,7 @@
       "int64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -79679,6 +269443,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -79708,6 +269473,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -79737,6 +269503,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -79766,6 +269533,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -79795,6 +269563,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -79824,6 +269593,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -79853,6 +269623,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -79882,6 +269653,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -79911,6 +269683,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -79941,6 +269714,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -79971,6 +269745,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -80001,6 +269776,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -80031,6 +269807,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -80061,6 +269838,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -80091,6 +269869,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -80121,6 +269900,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -80151,6 +269931,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -80179,6 +269960,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -80208,6 +269990,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -80240,6 +270023,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -80272,6 +270056,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -80304,6 +270089,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -80336,6 +270122,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -80368,6 +270155,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -80399,6 +270187,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -80429,6 +270218,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -80459,6 +270249,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -80489,6 +270280,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -80519,6 +270311,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -80549,6 +270342,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -80579,6 +270373,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -80609,6 +270404,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -80639,6 +270435,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -80669,6 +270466,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -80699,6 +270497,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -80729,6 +270528,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -80759,6 +270559,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -80789,6 +270590,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -80818,6 +270620,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -80843,6 +270646,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -80866,6 +270670,7 @@
       "uint32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -80891,6 +270696,7 @@
       "float64_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -80914,6 +270720,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -80939,6 +270746,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -80962,6 +270770,7 @@
       "uint32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -80987,6 +270796,7 @@
       "float32_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -81011,6 +270821,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -81040,6 +270851,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -81067,6 +270879,7 @@
       "float64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64_t"
     },
     "Arguments_Preparation": {
@@ -81094,6 +270907,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -81123,6 +270937,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -81150,6 +270965,7 @@
       "float32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32_t"
     },
     "Arguments_Preparation": {
@@ -81178,6 +270994,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -81212,6 +271029,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -81246,6 +271064,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -81280,6 +271099,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -81314,6 +271134,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -81348,6 +271169,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -81382,6 +271204,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -81416,6 +271239,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -81450,6 +271274,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -81482,6 +271307,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -81514,6 +271340,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -81548,6 +271375,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -81582,6 +271410,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -81616,6 +271445,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -81650,6 +271480,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -81684,6 +271515,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -81718,6 +271550,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -81752,6 +271585,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -81786,6 +271620,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -81817,6 +271652,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -81848,6 +271684,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -81879,6 +271716,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -81910,6 +271748,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -81941,6 +271780,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -81971,6 +271811,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -82000,6 +271841,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -82029,6 +271871,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -82058,6 +271901,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -82087,6 +271931,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -82116,6 +271961,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -82146,6 +271992,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -82180,6 +272027,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -82212,6 +272060,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -82246,6 +272095,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -82279,6 +272129,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -82313,6 +272164,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -82347,6 +272199,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -82381,6 +272234,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -82415,6 +272269,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -82449,6 +272304,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -82483,6 +272339,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -82517,6 +272374,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -82551,6 +272409,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -82585,6 +272444,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -82619,6 +272479,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -82651,6 +272512,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -82685,6 +272547,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -82718,6 +272581,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -82752,6 +272616,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -82786,6 +272651,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -82820,6 +272686,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -82854,6 +272721,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -82888,6 +272756,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -82922,6 +272791,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -82956,6 +272826,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -82990,6 +272861,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -83024,6 +272896,7 @@
       "uint32x4_t wk"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -83054,6 +272927,7 @@
       "uint32_t hash_e"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -83080,6 +272954,7 @@
       "uint32x4_t wk"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -83112,6 +272987,7 @@
       "uint32x4_t wk"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -83144,6 +273020,7 @@
       "uint32x4_t w8_11"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -83175,6 +273052,7 @@
       "uint32x4_t w12_15"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -83204,6 +273082,7 @@
       "uint32x4_t wk"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -83236,6 +273115,7 @@
       "uint32x4_t wk"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -83267,6 +273147,7 @@
       "uint32x4_t w4_7"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -83296,6 +273177,7 @@
       "uint32x4_t w12_15"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -83328,6 +273210,7 @@
       "uint64x2_t hash_ab"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -83357,6 +273240,7 @@
       "uint64x2_t kwh_kwh2"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -83385,6 +273269,7 @@
       "uint64x2_t w2_"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -83413,6 +273298,7 @@
       "uint64x2_t w9_10"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -83441,6 +273327,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -83471,6 +273358,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -83501,6 +273389,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -83531,6 +273420,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -83561,6 +273451,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -83591,6 +273482,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -83621,6 +273513,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -83651,6 +273544,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -83681,6 +273575,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -83710,6 +273605,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -83739,6 +273635,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -83768,6 +273665,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -83797,6 +273695,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -83826,6 +273725,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -83855,6 +273755,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -83884,6 +273785,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -83913,6 +273815,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -83941,6 +273844,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -83969,6 +273873,7 @@
       "int64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -83996,6 +273901,7 @@
       "int64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -84023,6 +273929,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -84031,7 +273938,7 @@
       },
       "n": {
         "minimum": 0,
-        "maximum": 15
+        "maximum": 16
       }
     },
     "Architectures": [
@@ -84051,6 +273958,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -84059,7 +273967,7 @@
       },
       "n": {
         "minimum": 0,
-        "maximum": 31
+        "maximum": 32
       }
     },
     "Architectures": [
@@ -84079,6 +273987,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -84087,7 +273996,7 @@
       },
       "n": {
         "minimum": 0,
-        "maximum": 7
+        "maximum": 8
       }
     },
     "Architectures": [
@@ -84107,6 +274016,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -84115,7 +274025,7 @@
       },
       "n": {
         "minimum": 0,
-        "maximum": 15
+        "maximum": 16
       }
     },
     "Architectures": [
@@ -84135,6 +274045,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -84143,7 +274054,7 @@
       },
       "n": {
         "minimum": 0,
-        "maximum": 31
+        "maximum": 32
       }
     },
     "Architectures": [
@@ -84163,6 +274074,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -84171,7 +274083,7 @@
       },
       "n": {
         "minimum": 0,
-        "maximum": 7
+        "maximum": 8
       }
     },
     "Architectures": [
@@ -84191,6 +274103,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -84199,7 +274112,7 @@
       },
       "n": {
         "minimum": 0,
-        "maximum": 15
+        "maximum": 16
       }
     },
     "Architectures": [
@@ -84221,6 +274134,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -84229,7 +274143,7 @@
       },
       "n": {
         "minimum": 0,
-        "maximum": 31
+        "maximum": 32
       }
     },
     "Architectures": [
@@ -84251,6 +274165,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -84259,7 +274174,7 @@
       },
       "n": {
         "minimum": 0,
-        "maximum": 7
+        "maximum": 8
       }
     },
     "Architectures": [
@@ -84281,6 +274196,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -84289,7 +274205,7 @@
       },
       "n": {
         "minimum": 0,
-        "maximum": 15
+        "maximum": 16
       }
     },
     "Architectures": [
@@ -84311,6 +274227,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -84319,7 +274236,7 @@
       },
       "n": {
         "minimum": 0,
-        "maximum": 31
+        "maximum": 32
       }
     },
     "Architectures": [
@@ -84341,6 +274258,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -84349,7 +274267,7 @@
       },
       "n": {
         "minimum": 0,
-        "maximum": 7
+        "maximum": 8
       }
     },
     "Architectures": [
@@ -84371,6 +274289,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -84401,6 +274320,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -84431,6 +274351,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -84461,6 +274382,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -84491,6 +274413,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -84521,6 +274444,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -84551,6 +274475,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -84581,6 +274506,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -84611,6 +274537,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -84640,6 +274567,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -84669,6 +274597,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -84698,6 +274627,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -84727,6 +274657,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -84756,6 +274687,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -84785,6 +274717,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -84814,6 +274747,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -84843,6 +274777,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -84873,6 +274808,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -84903,6 +274839,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -84933,6 +274870,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -84963,6 +274901,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -84993,6 +274932,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -85023,6 +274963,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -85053,6 +274994,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -85083,6 +275025,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -85111,6 +275054,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -85140,6 +275084,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -85172,6 +275117,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -85204,6 +275150,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -85236,6 +275183,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -85268,6 +275216,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -85300,6 +275249,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -85331,6 +275281,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -85361,6 +275312,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -85391,6 +275343,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -85421,6 +275374,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -85451,6 +275405,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -85481,6 +275436,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -85511,6 +275467,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -85541,6 +275498,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -85571,6 +275529,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -85601,6 +275560,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -85631,6 +275591,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -85661,6 +275622,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -85691,6 +275653,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -85721,6 +275684,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -85752,6 +275716,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -85786,6 +275751,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -85819,6 +275785,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -85853,6 +275820,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -85887,6 +275855,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -85921,6 +275890,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -85955,6 +275925,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -85989,6 +275960,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -86023,6 +275995,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -86057,6 +276030,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -86091,6 +276065,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -86125,6 +276100,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -86157,6 +276133,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -86189,6 +276166,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -86223,6 +276201,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -86256,6 +276235,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -86290,6 +276270,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -86324,6 +276305,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -86358,6 +276340,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -86392,6 +276375,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -86426,6 +276410,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -86460,6 +276445,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -86494,6 +276480,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -86528,6 +276515,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -86562,6 +276550,7 @@
       "uint32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -86589,6 +276578,7 @@
       "uint32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -86616,6 +276606,7 @@
       "uint32x4_t c"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -86644,6 +276635,7 @@
       "const int imm2"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -86676,6 +276668,7 @@
       "const int imm2"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -86708,6 +276701,7 @@
       "const int imm2"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -86740,6 +276734,7 @@
       "const int imm2"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -86770,6 +276765,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -86795,6 +276791,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -86820,6 +276817,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -86847,6 +276845,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -86874,6 +276873,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -86901,6 +276901,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -86928,6 +276929,7 @@
       "int8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8_t"
     },
     "Arguments_Preparation": {
@@ -86955,6 +276957,7 @@
       "int64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -86982,6 +276985,7 @@
       "int16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16_t"
     },
     "Arguments_Preparation": {
@@ -87009,6 +277013,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -87036,6 +277041,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -87063,6 +277069,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -87090,6 +277097,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -87117,6 +277125,7 @@
       "int32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32_t"
     },
     "Arguments_Preparation": {
@@ -87143,6 +277152,7 @@
       "float32x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -87166,6 +277176,7 @@
       "float64x1_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -87189,6 +277200,7 @@
       "float32x4_t a"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -87212,6 +277224,7 @@
       "float64x2_t a"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -87237,6 +277250,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -87271,6 +277285,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -87305,6 +277320,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -87339,6 +277355,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -87373,6 +277390,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -87407,6 +277425,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -87441,6 +277460,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -87475,6 +277495,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -87509,6 +277530,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -87541,6 +277563,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -87573,6 +277596,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -87607,6 +277631,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -87641,6 +277666,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -87675,6 +277701,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -87709,6 +277736,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -87743,6 +277771,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -87777,6 +277806,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -87811,6 +277841,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -87845,6 +277876,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -87879,6 +277911,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x1_t"
     },
     "Arguments_Preparation": {
@@ -87912,6 +277945,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -87946,6 +277980,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -87980,6 +278015,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -88014,6 +278050,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -88048,6 +278085,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -88082,6 +278120,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -88116,6 +278155,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -88150,6 +278190,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -88184,6 +278225,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -88218,6 +278260,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -88250,6 +278293,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -88282,6 +278326,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -88316,6 +278361,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -88349,6 +278395,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -88383,6 +278430,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -88417,6 +278465,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -88451,6 +278500,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -88485,6 +278535,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -88519,6 +278570,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -88553,6 +278605,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -88587,6 +278640,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -88621,6 +278675,7 @@
       "const int n"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -88654,6 +278709,7 @@
       "float32x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -88683,6 +278739,7 @@
       "float32x2x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -88712,6 +278769,7 @@
       "float32x2x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -88741,6 +278799,7 @@
       "float32x2x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -88770,6 +278829,7 @@
       "float64x1_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -88797,6 +278857,7 @@
       "float64x1x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -88824,6 +278885,7 @@
       "float64x1x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -88851,6 +278913,7 @@
       "float64x1x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -88879,6 +278942,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -88913,6 +278977,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -88945,6 +279010,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -88979,6 +279045,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89012,6 +279079,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89046,6 +279114,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89080,6 +279149,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89114,6 +279184,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89148,6 +279219,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89182,6 +279254,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89216,6 +279289,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89250,6 +279324,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89284,6 +279359,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89317,6 +279393,7 @@
       "poly16x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89346,6 +279423,7 @@
       "poly16x4x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89375,6 +279453,7 @@
       "poly16x4x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89404,6 +279483,7 @@
       "poly16x4x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89433,6 +279513,7 @@
       "poly64x1_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89461,6 +279542,7 @@
       "poly64x1x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89489,6 +279571,7 @@
       "poly64x1x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89517,6 +279600,7 @@
       "poly64x1x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89545,6 +279629,7 @@
       "poly8x8_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89574,6 +279659,7 @@
       "poly8x8x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89603,6 +279689,7 @@
       "poly8x8x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89632,6 +279719,7 @@
       "poly8x8x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89661,6 +279749,7 @@
       "int16x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89690,6 +279779,7 @@
       "int16x4x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89719,6 +279809,7 @@
       "int16x4x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89748,6 +279839,7 @@
       "int16x4x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89777,6 +279869,7 @@
       "int32x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89806,6 +279899,7 @@
       "int32x2x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89835,6 +279929,7 @@
       "int32x2x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89864,6 +279959,7 @@
       "int32x2x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89893,6 +279989,7 @@
       "int64x1_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89922,6 +280019,7 @@
       "int64x1x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89951,6 +280049,7 @@
       "int64x1x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -89980,6 +280079,7 @@
       "int64x1x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90009,6 +280109,7 @@
       "int8x8_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90038,6 +280139,7 @@
       "int8x8x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90067,6 +280169,7 @@
       "int8x8x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90096,6 +280199,7 @@
       "int8x8x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90125,6 +280229,7 @@
       "uint16x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90154,6 +280259,7 @@
       "uint16x4x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90183,6 +280289,7 @@
       "uint16x4x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90212,6 +280319,7 @@
       "uint16x4x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90241,6 +280349,7 @@
       "uint32x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90270,6 +280379,7 @@
       "uint32x2x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90299,6 +280409,7 @@
       "uint32x2x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90328,6 +280439,7 @@
       "uint32x2x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90357,6 +280469,7 @@
       "uint64x1_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90386,6 +280499,7 @@
       "uint64x1x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90415,6 +280529,7 @@
       "uint64x1x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90444,6 +280559,7 @@
       "uint64x1x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90473,6 +280589,7 @@
       "uint8x8_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90502,6 +280619,7 @@
       "uint8x8x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90531,6 +280649,7 @@
       "uint8x8x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90560,6 +280679,7 @@
       "uint8x8x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90589,6 +280709,7 @@
       "float32x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90618,6 +280739,7 @@
       "float32x4x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90647,6 +280769,7 @@
       "float32x4x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90676,6 +280799,7 @@
       "float32x4x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90705,6 +280829,7 @@
       "float64x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90732,6 +280857,7 @@
       "float64x2x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90759,6 +280885,7 @@
       "float64x2x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90786,6 +280913,7 @@
       "float64x2x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90814,6 +280942,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90848,6 +280977,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90880,6 +281010,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90914,6 +281045,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90947,6 +281079,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -90981,6 +281114,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91015,6 +281149,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91049,6 +281184,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91083,6 +281219,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91117,6 +281254,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91151,6 +281289,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91185,6 +281324,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91219,6 +281359,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91252,6 +281393,7 @@
       "poly16x8_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91281,6 +281423,7 @@
       "poly16x8x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91310,6 +281453,7 @@
       "poly16x8x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91339,6 +281483,7 @@
       "poly16x8x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91368,6 +281513,7 @@
       "poly64x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91396,6 +281542,7 @@
       "poly64x2x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91424,6 +281571,7 @@
       "poly64x2x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91453,6 +281601,7 @@
       "poly64x2x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91481,6 +281630,7 @@
       "poly8x16_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91510,6 +281660,7 @@
       "poly8x16x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91539,6 +281690,7 @@
       "poly8x16x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91568,6 +281720,7 @@
       "poly8x16x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91597,6 +281750,7 @@
       "int16x8_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91626,6 +281780,7 @@
       "int16x8x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91655,6 +281810,7 @@
       "int16x8x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91684,6 +281840,7 @@
       "int16x8x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91713,6 +281870,7 @@
       "int32x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91742,6 +281900,7 @@
       "int32x4x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91771,6 +281930,7 @@
       "int32x4x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91800,6 +281960,7 @@
       "int32x4x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91829,6 +281990,7 @@
       "int64x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91858,6 +282020,7 @@
       "int64x2x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91887,6 +282050,7 @@
       "int64x2x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91916,6 +282080,7 @@
       "int64x2x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91945,6 +282110,7 @@
       "int8x16_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -91974,6 +282140,7 @@
       "int8x16x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92003,6 +282170,7 @@
       "int8x16x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92032,6 +282200,7 @@
       "int8x16x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92061,6 +282230,7 @@
       "uint16x8_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92090,6 +282260,7 @@
       "uint16x8x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92119,6 +282290,7 @@
       "uint16x8x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92148,6 +282320,7 @@
       "uint16x8x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92177,6 +282350,7 @@
       "uint32x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92206,6 +282380,7 @@
       "uint32x4x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92235,6 +282410,7 @@
       "uint32x4x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92264,6 +282440,7 @@
       "uint32x4x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92293,6 +282470,7 @@
       "uint64x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92322,6 +282500,7 @@
       "uint64x2x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92351,6 +282530,7 @@
       "uint64x2x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92380,6 +282560,7 @@
       "uint64x2x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92409,6 +282590,7 @@
       "uint8x16_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92438,6 +282620,7 @@
       "uint8x16x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92467,6 +282650,7 @@
       "uint8x16x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92496,6 +282680,7 @@
       "uint8x16x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92525,6 +282710,7 @@
       "float32x2x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92554,6 +282740,7 @@
       "float64x1x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92582,6 +282769,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92616,6 +282804,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92648,6 +282837,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92682,6 +282872,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92714,6 +282905,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92748,6 +282940,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92782,6 +282975,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92816,6 +283010,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92848,6 +283043,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92882,6 +283078,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92916,6 +283113,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92950,6 +283148,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -92982,6 +283181,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93015,6 +283215,7 @@
       "poly16x4x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93044,6 +283245,7 @@
       "poly64x1x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93072,6 +283274,7 @@
       "poly8x8x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93101,6 +283304,7 @@
       "int16x4x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93130,6 +283334,7 @@
       "int32x2x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93159,6 +283364,7 @@
       "int64x1x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93188,6 +283394,7 @@
       "int8x8x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93217,6 +283424,7 @@
       "uint16x4x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93246,6 +283454,7 @@
       "uint32x2x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93275,6 +283484,7 @@
       "uint64x1x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93304,6 +283514,7 @@
       "uint8x8x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93333,6 +283544,7 @@
       "float32x4x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93362,6 +283574,7 @@
       "float64x2x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93390,6 +283603,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93424,6 +283638,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93456,6 +283671,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93490,6 +283706,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93522,6 +283739,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93554,6 +283772,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93588,6 +283807,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93622,6 +283842,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93654,6 +283875,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93686,6 +283908,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93720,6 +283943,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93754,6 +283978,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93786,6 +284011,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93817,6 +284043,7 @@
       "poly16x8x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93846,6 +284073,7 @@
       "poly64x2x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93873,6 +284101,7 @@
       "poly8x16x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93902,6 +284131,7 @@
       "int16x8x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93931,6 +284161,7 @@
       "int32x4x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93960,6 +284191,7 @@
       "int64x2x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -93987,6 +284219,7 @@
       "int8x16x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94016,6 +284249,7 @@
       "uint16x8x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94045,6 +284279,7 @@
       "uint32x4x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94074,6 +284309,7 @@
       "uint64x2x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94101,6 +284337,7 @@
       "uint8x16x2_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94130,6 +284367,7 @@
       "float32x2x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94159,6 +284397,7 @@
       "float64x1x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94187,6 +284426,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94221,6 +284461,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94253,6 +284494,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94287,6 +284529,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94319,6 +284562,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94353,6 +284597,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94387,6 +284632,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94421,6 +284667,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94453,6 +284700,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94487,6 +284735,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94521,6 +284770,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94555,6 +284805,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94587,6 +284838,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94620,6 +284872,7 @@
       "poly16x4x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94649,6 +284902,7 @@
       "poly64x1x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94677,6 +284931,7 @@
       "poly8x8x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94706,6 +284961,7 @@
       "int16x4x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94735,6 +284991,7 @@
       "int32x2x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94764,6 +285021,7 @@
       "int64x1x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94793,6 +285051,7 @@
       "int8x8x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94822,6 +285081,7 @@
       "uint16x4x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94851,6 +285111,7 @@
       "uint32x2x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94880,6 +285141,7 @@
       "uint64x1x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94909,6 +285171,7 @@
       "uint8x8x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94938,6 +285201,7 @@
       "float32x4x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94967,6 +285231,7 @@
       "float64x2x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -94995,6 +285260,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95029,6 +285295,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95061,6 +285328,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95095,6 +285363,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95127,6 +285396,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95161,6 +285431,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95195,6 +285466,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95229,6 +285501,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95261,6 +285534,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95295,6 +285569,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95329,6 +285604,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95363,6 +285639,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95395,6 +285672,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95428,6 +285706,7 @@
       "poly16x8x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95457,6 +285736,7 @@
       "poly64x2x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95484,6 +285764,7 @@
       "poly8x16x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95513,6 +285794,7 @@
       "int16x8x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95542,6 +285824,7 @@
       "int32x4x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95571,6 +285854,7 @@
       "int64x2x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95598,6 +285882,7 @@
       "int8x16x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95627,6 +285912,7 @@
       "uint16x8x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95656,6 +285942,7 @@
       "uint32x4x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95685,6 +285972,7 @@
       "uint64x2x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95712,6 +286000,7 @@
       "uint8x16x3_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95741,6 +286030,7 @@
       "float32x2x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95770,6 +286060,7 @@
       "float64x1x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95798,6 +286089,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95832,6 +286124,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95864,6 +286157,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95898,6 +286192,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95930,6 +286225,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95964,6 +286260,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -95998,6 +286295,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96032,6 +286330,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96064,6 +286363,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96098,6 +286398,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96132,6 +286433,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96166,6 +286468,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96198,6 +286501,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96231,6 +286535,7 @@
       "poly16x4x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96260,6 +286565,7 @@
       "poly64x1x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96288,6 +286594,7 @@
       "poly8x8x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96317,6 +286624,7 @@
       "int16x4x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96346,6 +286654,7 @@
       "int32x2x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96375,6 +286684,7 @@
       "int64x1x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96404,6 +286714,7 @@
       "int8x8x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96433,6 +286744,7 @@
       "uint16x4x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96462,6 +286774,7 @@
       "uint32x2x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96491,6 +286804,7 @@
       "uint64x1x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96520,6 +286834,7 @@
       "uint8x8x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96549,6 +286864,7 @@
       "float32x4x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96578,6 +286894,7 @@
       "float64x2x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96606,6 +286923,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96640,6 +286958,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96672,6 +286991,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96706,6 +287026,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96738,6 +287059,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96770,6 +287092,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96804,6 +287127,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96838,6 +287162,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96870,6 +287195,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96902,6 +287228,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96936,6 +287263,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -96970,6 +287298,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -97002,6 +287331,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -97033,6 +287363,7 @@
       "poly16x8x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -97062,6 +287393,7 @@
       "poly64x2x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -97089,6 +287421,7 @@
       "poly8x16x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -97118,6 +287451,7 @@
       "int16x8x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -97147,6 +287481,7 @@
       "int32x4x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -97176,6 +287511,7 @@
       "int64x2x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -97203,6 +287539,7 @@
       "int8x16x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -97232,6 +287569,7 @@
       "uint16x8x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -97261,6 +287599,7 @@
       "uint32x4x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -97290,6 +287629,7 @@
       "uint64x2x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -97317,6 +287657,7 @@
       "uint8x16x4_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -97346,6 +287687,7 @@
       "poly128_t val"
     ],
     "return_type": {
+      "element_bit_size": "0",
       "value": "void"
     },
     "Arguments_Preparation": {
@@ -97374,6 +287716,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -97403,6 +287746,7 @@
       "float64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x1_t"
     },
     "Arguments_Preparation": {
@@ -97430,6 +287774,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -97459,6 +287804,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -97488,6 +287834,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -97517,6 +287864,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -97546,6 +287894,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -97575,6 +287924,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -97604,6 +287954,7 @@
       "uint64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -97633,6 +287984,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -97662,6 +288014,7 @@
       "int64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -97689,6 +288042,7 @@
       "uint64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -97717,6 +288071,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -97748,6 +288103,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -97779,6 +288135,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -97810,6 +288167,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -97841,6 +288199,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -97872,6 +288231,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -97902,6 +288262,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -97931,6 +288292,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -97960,6 +288322,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -97989,6 +288352,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -98018,6 +288382,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -98047,6 +288412,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -98076,6 +288442,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -98103,6 +288470,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -98130,6 +288498,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -98157,6 +288526,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -98184,6 +288554,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -98211,6 +288582,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -98238,6 +288610,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -98267,6 +288640,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -98296,6 +288670,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -98325,6 +288700,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -98354,6 +288730,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -98383,6 +288760,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -98412,6 +288790,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -98441,6 +288820,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -98468,6 +288848,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -98497,6 +288878,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -98526,6 +288908,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -98555,6 +288938,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -98584,6 +288968,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -98613,6 +288998,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -98642,6 +289028,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -98671,6 +289058,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -98700,6 +289088,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -98727,6 +289116,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -98754,6 +289144,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -98781,6 +289172,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -98808,6 +289200,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -98835,6 +289228,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -98862,6 +289256,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -98891,6 +289286,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -98920,6 +289316,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -98949,6 +289346,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -98978,6 +289376,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -99007,6 +289406,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -99038,6 +289438,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -99075,6 +289476,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -99111,6 +289513,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -99148,6 +289551,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -99182,6 +289586,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -99209,6 +289614,7 @@
       "int8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -99236,6 +289642,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -99263,6 +289670,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -99290,6 +289698,7 @@
       "int8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -99317,6 +289726,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -99344,6 +289754,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -99371,6 +289782,7 @@
       "int8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -99398,6 +289810,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -99425,6 +289838,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -99452,6 +289866,7 @@
       "int8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -99479,6 +289894,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -99507,6 +289923,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -99539,6 +289956,7 @@
       "int8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -99571,6 +289989,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -99603,6 +290022,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -99632,6 +290052,7 @@
       "int8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -99661,6 +290082,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -99690,6 +290112,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -99722,6 +290145,7 @@
       "int8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -99754,6 +290178,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -99786,6 +290211,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -99815,6 +290241,7 @@
       "int8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -99844,6 +290271,7 @@
       "uint8x8_t idx"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -99872,6 +290300,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -99899,6 +290328,7 @@
       "poly16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -99926,6 +290356,7 @@
       "poly8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -99953,6 +290384,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -99980,6 +290412,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -100007,6 +290440,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -100034,6 +290468,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -100061,6 +290496,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -100088,6 +290524,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -100115,6 +290552,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -100142,6 +290580,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -100169,6 +290608,7 @@
       "poly16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -100196,6 +290636,7 @@
       "poly64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -100223,6 +290664,7 @@
       "poly8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -100250,6 +290692,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -100277,6 +290720,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -100304,6 +290748,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -100331,6 +290776,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -100358,6 +290804,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -100385,6 +290832,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -100412,6 +290860,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -100439,6 +290888,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -100466,6 +290916,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -100493,6 +290944,7 @@
       "poly16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -100520,6 +290972,7 @@
       "poly8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -100547,6 +291000,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -100574,6 +291028,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -100601,6 +291056,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -100628,6 +291084,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -100655,6 +291112,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -100682,6 +291140,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -100709,6 +291168,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -100736,6 +291196,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -100763,6 +291224,7 @@
       "poly16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -100790,6 +291252,7 @@
       "poly64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -100817,6 +291280,7 @@
       "poly8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -100844,6 +291308,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -100871,6 +291336,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -100898,6 +291364,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -100925,6 +291392,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -100952,6 +291420,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -100979,6 +291448,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -101006,6 +291476,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -101033,6 +291504,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -101060,6 +291532,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -101090,6 +291563,7 @@
       "poly16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -101120,6 +291594,7 @@
       "poly8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -101150,6 +291625,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -101180,6 +291656,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -101210,6 +291687,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -101240,6 +291718,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -101270,6 +291749,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -101300,6 +291780,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -101330,6 +291811,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -101360,6 +291842,7 @@
       "poly16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -101390,6 +291873,7 @@
       "poly8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -101420,6 +291904,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -101450,6 +291935,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -101480,6 +291966,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -101510,6 +291997,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -101540,6 +292028,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -101570,6 +292059,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -101600,6 +292090,7 @@
       "poly64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -101628,6 +292119,7 @@
       "poly8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -101657,6 +292149,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -101686,6 +292179,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -101715,6 +292209,7 @@
       "int64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -101742,6 +292237,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -101771,6 +292267,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -101800,6 +292297,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -101829,6 +292327,7 @@
       "uint64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x1_t"
     },
     "Arguments_Preparation": {
@@ -101856,6 +292355,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -101885,6 +292385,7 @@
       "int64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -101912,6 +292413,7 @@
       "uint64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64_t"
     },
     "Arguments_Preparation": {
@@ -101939,6 +292441,7 @@
       "poly64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -101967,6 +292470,7 @@
       "poly8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -101996,6 +292500,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -102025,6 +292530,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -102054,6 +292560,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -102081,6 +292588,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -102110,6 +292618,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -102139,6 +292648,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -102168,6 +292678,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -102195,6 +292706,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -102224,6 +292736,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -102251,6 +292764,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -102278,6 +292792,7 @@
       "uint64x1_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x1_t"
     },
     "Arguments_Preparation": {
@@ -102305,6 +292820,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -102332,6 +292848,7 @@
       "uint8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8_t"
     },
     "Arguments_Preparation": {
@@ -102359,6 +292876,7 @@
       "uint64_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64_t"
     },
     "Arguments_Preparation": {
@@ -102386,6 +292904,7 @@
       "uint16_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16_t"
     },
     "Arguments_Preparation": {
@@ -102413,6 +292932,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -102440,6 +292960,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -102467,6 +292988,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -102494,6 +293016,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -102521,6 +293044,7 @@
       "uint32_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32_t"
     },
     "Arguments_Preparation": {
@@ -102550,6 +293074,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -102587,6 +293112,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -102622,6 +293148,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -102655,6 +293182,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -102692,6 +293220,7 @@
       "const int lane"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -102727,6 +293256,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -102758,6 +293288,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -102789,6 +293320,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -102816,6 +293348,7 @@
       "poly16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -102843,6 +293376,7 @@
       "poly8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -102870,6 +293404,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -102897,6 +293432,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -102924,6 +293460,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -102951,6 +293488,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -102978,6 +293516,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -103005,6 +293544,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -103032,6 +293572,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -103059,6 +293600,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -103086,6 +293628,7 @@
       "poly16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -103113,6 +293656,7 @@
       "poly64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -103140,6 +293684,7 @@
       "poly8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -103167,6 +293712,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -103194,6 +293740,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -103221,6 +293768,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -103248,6 +293796,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -103275,6 +293824,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -103302,6 +293852,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -103329,6 +293880,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -103356,6 +293908,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -103383,6 +293936,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -103410,6 +293964,7 @@
       "poly16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -103437,6 +293992,7 @@
       "poly8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -103464,6 +294020,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -103491,6 +294048,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -103518,6 +294076,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -103545,6 +294104,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -103572,6 +294132,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -103599,6 +294160,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -103626,6 +294188,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -103653,6 +294216,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -103680,6 +294244,7 @@
       "poly16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -103707,6 +294272,7 @@
       "poly64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -103734,6 +294300,7 @@
       "poly8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -103761,6 +294328,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -103788,6 +294356,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -103815,6 +294384,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -103842,6 +294412,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -103869,6 +294440,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -103896,6 +294468,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -103923,6 +294496,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -103950,6 +294524,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -103977,6 +294552,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -104007,6 +294583,7 @@
       "poly16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -104037,6 +294614,7 @@
       "poly8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -104067,6 +294645,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -104097,6 +294676,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -104127,6 +294707,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -104157,6 +294738,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -104187,6 +294769,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -104217,6 +294800,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -104247,6 +294831,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -104277,6 +294862,7 @@
       "poly16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -104307,6 +294893,7 @@
       "poly8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -104337,6 +294924,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -104367,6 +294955,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -104397,6 +294986,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -104427,6 +295017,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -104457,6 +295048,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -104487,6 +295079,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -104518,6 +295111,7 @@
       "const int imm6"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -104547,6 +295141,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -104574,6 +295169,7 @@
       "poly16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -104601,6 +295197,7 @@
       "poly8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -104628,6 +295225,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -104655,6 +295253,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -104682,6 +295281,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -104709,6 +295309,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -104736,6 +295337,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -104763,6 +295365,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -104790,6 +295393,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -104817,6 +295421,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -104844,6 +295449,7 @@
       "poly16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -104871,6 +295477,7 @@
       "poly64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -104898,6 +295505,7 @@
       "poly8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -104925,6 +295533,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -104952,6 +295561,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -104979,6 +295589,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -105006,6 +295617,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -105033,6 +295645,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -105060,6 +295673,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -105087,6 +295701,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -105114,6 +295729,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -105141,6 +295757,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2_t"
     },
     "Arguments_Preparation": {
@@ -105168,6 +295785,7 @@
       "poly16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4_t"
     },
     "Arguments_Preparation": {
@@ -105195,6 +295813,7 @@
       "poly8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8_t"
     },
     "Arguments_Preparation": {
@@ -105222,6 +295841,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4_t"
     },
     "Arguments_Preparation": {
@@ -105249,6 +295869,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2_t"
     },
     "Arguments_Preparation": {
@@ -105276,6 +295897,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8_t"
     },
     "Arguments_Preparation": {
@@ -105303,6 +295925,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4_t"
     },
     "Arguments_Preparation": {
@@ -105330,6 +295953,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2_t"
     },
     "Arguments_Preparation": {
@@ -105357,6 +295981,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8_t"
     },
     "Arguments_Preparation": {
@@ -105384,6 +296009,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4_t"
     },
     "Arguments_Preparation": {
@@ -105411,6 +296037,7 @@
       "float64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "float64x2_t"
     },
     "Arguments_Preparation": {
@@ -105438,6 +296065,7 @@
       "poly16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8_t"
     },
     "Arguments_Preparation": {
@@ -105465,6 +296093,7 @@
       "poly64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "poly64x2_t"
     },
     "Arguments_Preparation": {
@@ -105492,6 +296121,7 @@
       "poly8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16_t"
     },
     "Arguments_Preparation": {
@@ -105519,6 +296149,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8_t"
     },
     "Arguments_Preparation": {
@@ -105546,6 +296177,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4_t"
     },
     "Arguments_Preparation": {
@@ -105573,6 +296205,7 @@
       "int64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "int64x2_t"
     },
     "Arguments_Preparation": {
@@ -105600,6 +296233,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16_t"
     },
     "Arguments_Preparation": {
@@ -105627,6 +296261,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8_t"
     },
     "Arguments_Preparation": {
@@ -105654,6 +296289,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4_t"
     },
     "Arguments_Preparation": {
@@ -105681,6 +296317,7 @@
       "uint64x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "64",
       "value": "uint64x2_t"
     },
     "Arguments_Preparation": {
@@ -105708,6 +296345,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16_t"
     },
     "Arguments_Preparation": {
@@ -105735,6 +296373,7 @@
       "float32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -105765,6 +296404,7 @@
       "poly16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -105795,6 +296435,7 @@
       "poly8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -105825,6 +296466,7 @@
       "int16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -105855,6 +296497,7 @@
       "int32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -105885,6 +296528,7 @@
       "int8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -105915,6 +296559,7 @@
       "uint16x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x4x2_t"
     },
     "Arguments_Preparation": {
@@ -105945,6 +296590,7 @@
       "uint32x2_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x2x2_t"
     },
     "Arguments_Preparation": {
@@ -105975,6 +296621,7 @@
       "uint8x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x8x2_t"
     },
     "Arguments_Preparation": {
@@ -106005,6 +296652,7 @@
       "float32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "float32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -106035,6 +296683,7 @@
       "poly16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "poly16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -106065,6 +296714,7 @@
       "poly8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "poly8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -106095,6 +296745,7 @@
       "int16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "int16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -106125,6 +296776,7 @@
       "int32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "int32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -106155,6 +296807,7 @@
       "int8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "int8x16x2_t"
     },
     "Arguments_Preparation": {
@@ -106185,6 +296838,7 @@
       "uint16x8_t b"
     ],
     "return_type": {
+      "element_bit_size": "16",
       "value": "uint16x8x2_t"
     },
     "Arguments_Preparation": {
@@ -106215,6 +296869,7 @@
       "uint32x4_t b"
     ],
     "return_type": {
+      "element_bit_size": "32",
       "value": "uint32x4x2_t"
     },
     "Arguments_Preparation": {
@@ -106245,6 +296900,7 @@
       "uint8x16_t b"
     ],
     "return_type": {
+      "element_bit_size": "8",
       "value": "uint8x16x2_t"
     },
     "Arguments_Preparation": {
diff --git a/vendor.yml b/vendor.yml
index ac5b408ac4..fd2bfecba7 100644
--- a/vendor.yml
+++ b/vendor.yml
@@ -1,2 +1,3 @@
-- crates/stdarch-verify/arm-intrinsics.html
 - crates/stdarch-verify/x86-intel.xml
+- crates/stdarch-verify/mips-msa.h
+- intrinsics_data/arm_intrinsics.json

From 34b676f83cd49fa4a5bcb63e5d93999793f49b45 Mon Sep 17 00:00:00 2001
From: Luca Vizzarro <luca.vizzarro@arm.com>
Date: Tue, 5 Dec 2023 18:25:35 +0000
Subject: [PATCH 2/6] Generator for SVE intrinsics.

Co-authored-by: Jamie Cunliffe <Jamie.Cunliffe@arm.com>
Co-authored-by: Adam Gemmell <adam.gemmell@arm.com>
Co-authored-by: Jacob Bramley <jacob.bramley@arm.com>
---
 Cargo.toml                                  |    1 +
 crates/stdarch-gen2/Cargo.toml              |   22 +
 crates/stdarch-gen2/spec/sve/sve.spec.yml   | 4846 +++++++++++++++++++
 crates/stdarch-gen2/spec/sve/sve2.spec.yml  | 2992 ++++++++++++
 crates/stdarch-gen2/src/assert_instr.rs     |  372 ++
 crates/stdarch-gen2/src/context.rs          |  249 +
 crates/stdarch-gen2/src/expression.rs       |  546 +++
 crates/stdarch-gen2/src/input.rs            |  432 ++
 crates/stdarch-gen2/src/intrinsic.rs        | 1498 ++++++
 crates/stdarch-gen2/src/load_store_tests.rs |  818 ++++
 crates/stdarch-gen2/src/main.rs             |  273 ++
 crates/stdarch-gen2/src/matching.rs         |  170 +
 crates/stdarch-gen2/src/predicate_forms.rs  |  249 +
 crates/stdarch-gen2/src/typekinds.rs        | 1024 ++++
 crates/stdarch-gen2/src/wildcards.rs        |  179 +
 crates/stdarch-gen2/src/wildstring.rs       |  353 ++
 16 files changed, 14024 insertions(+)
 create mode 100644 crates/stdarch-gen2/Cargo.toml
 create mode 100644 crates/stdarch-gen2/spec/sve/sve.spec.yml
 create mode 100644 crates/stdarch-gen2/spec/sve/sve2.spec.yml
 create mode 100644 crates/stdarch-gen2/src/assert_instr.rs
 create mode 100644 crates/stdarch-gen2/src/context.rs
 create mode 100644 crates/stdarch-gen2/src/expression.rs
 create mode 100644 crates/stdarch-gen2/src/input.rs
 create mode 100644 crates/stdarch-gen2/src/intrinsic.rs
 create mode 100644 crates/stdarch-gen2/src/load_store_tests.rs
 create mode 100644 crates/stdarch-gen2/src/main.rs
 create mode 100644 crates/stdarch-gen2/src/matching.rs
 create mode 100644 crates/stdarch-gen2/src/predicate_forms.rs
 create mode 100644 crates/stdarch-gen2/src/typekinds.rs
 create mode 100644 crates/stdarch-gen2/src/wildcards.rs
 create mode 100644 crates/stdarch-gen2/src/wildstring.rs

diff --git a/Cargo.toml b/Cargo.toml
index b303f2872f..7843949079 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -5,6 +5,7 @@ members = [
   "crates/core_arch",
   "crates/std_detect",
   "crates/stdarch-gen",
+  "crates/stdarch-gen2",
   "crates/intrinsic-test",
   "examples/"
 ]
diff --git a/crates/stdarch-gen2/Cargo.toml b/crates/stdarch-gen2/Cargo.toml
new file mode 100644
index 0000000000..c9a039ea6b
--- /dev/null
+++ b/crates/stdarch-gen2/Cargo.toml
@@ -0,0 +1,22 @@
+[package]
+name = "stdarch-gen2"
+version = "0.1.0"
+authors = ["Luca Vizzarro <luca.vizzarro@arm.com>",
+        "Jamie Cunliffe <Jamie.Cunliffe@arm.com>",
+        "Adam Gemmell <Adam.Gemmell@arm.com",
+        "Jacob Bramley <jacob.bramley@arm.com>"]
+license = "MIT OR Apache-2.0"
+edition = "2021"
+
+# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
+
+[dependencies]
+itertools = "0.10"
+lazy_static = "1.4.0"
+proc-macro2 = "1.0"
+quote = "1.0"
+regex = "1.5"
+serde = { version = "1.0", features = ["derive"] }
+serde_with = "1.14"
+serde_yaml = "0.8"
+walkdir = "2.3.2"
diff --git a/crates/stdarch-gen2/spec/sve/sve.spec.yml b/crates/stdarch-gen2/spec/sve/sve.spec.yml
new file mode 100644
index 0000000000..7c8aed779b
--- /dev/null
+++ b/crates/stdarch-gen2/spec/sve/sve.spec.yml
@@ -0,0 +1,4846 @@
+arch_cfgs:
+  - arch_name: aarch64
+    target_feature: [sve]
+    llvm_prefix: llvm.aarch64.sve
+uses_neon_types: true
+intrinsics:
+  - name: svacge[{_n}_{type}]
+    doc: Absolute compare greater than or equal to
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{predicate}"
+    types: [f32, f64]
+    assert_instr: [facge]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "facge.{sve_type}" }
+
+  - name: svacgt[{_n}_{type}]
+    doc: Absolute compare greater than
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{predicate}"
+    types: [f32, f64]
+    assert_instr: [facgt]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "facgt.{sve_type}" }
+
+  - name: svacle[{_n}_{type}]
+    doc: Absolute compare less than or equal to
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{predicate}"
+    types: [f32, f64]
+    assert_instr: [facge]
+    n_variant_op: op2
+    compose:
+      - FnCall: ["svacge_{type}", [$pg, $op2, $op1]]
+
+  - name: svaclt[{_n}_{type}]
+    doc: Absolute compare less than
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{predicate}"
+    types: [f32, f64]
+    assert_instr: [facgt]
+    n_variant_op: op2
+    compose:
+      - FnCall: ["svacgt_{type}", [$pg, $op2, $op1]]
+
+  - name: svcadd[_{type}]{_mxz}
+    doc: Complex add with rotate
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    static_defs: ["const IMM_ROTATION: i32"]
+    constraints: [{ variable: IMM_ROTATION, any_values: [90, 270] }]
+    assert_instr: [[fcadd, "IMM_ROTATION = 90"]]
+    zeroing_method: { select: op1 }
+    compose:
+      - LLVMLink:
+          name: fcadd.{sve_type}
+          arguments:
+            - "pg: {predicate}"
+            - "op1: {sve_type}"
+            - "op2: {sve_type}"
+            - "imm_rotation: i32"
+      - FnCall: ["{llvm_link}", [$pg, $op1, $op2, $IMM_ROTATION]]
+
+  - name: svcmla[_{type}]{_mxz}
+    doc: Complex multiply-add with rotate
+    arguments:
+      - "pg: {predicate}"
+      - "op1: {sve_type}"
+      - "op2: {sve_type}"
+      - "op3: {sve_type}"
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    static_defs: ["const IMM_ROTATION: i32"]
+    constraints: [{ variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }]
+    assert_instr: [[fcmla, "IMM_ROTATION = 90"]]
+    zeroing_method: { select: op1 }
+    compose:
+      - LLVMLink:
+          name: fcmla.{sve_type}
+          arguments:
+            - "pg: {predicate}"
+            - "op1: {sve_type}"
+            - "op2: {sve_type}"
+            - "op3: {sve_type}"
+            - "imm_rotation: i32"
+      - FnCall: ["{llvm_link}", [$pg, $op1, $op2, $op3, $IMM_ROTATION]]
+
+  - name: svcmla_lane[_{type}]
+    doc: Complex multiply-add with rotate
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32]
+    static_defs: ["const IMM_INDEX: i32", "const IMM_ROTATION: i32"]
+    constraints:
+      - variable: IMM_INDEX
+        range: { match_size: "{type}", default: [0, 1], halfword: [0, 3] }
+      - { variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }
+    assert_instr: [[fcmla, "IMM_INDEX = 0, IMM_ROTATION = 90"]]
+    compose:
+      - LLVMLink:
+          name: fcmla.lane.x.{sve_type}
+          arguments:
+            - "op1: {sve_type}"
+            - "op2: {sve_type}"
+            - "op3: {sve_type}"
+            - "imm_index: i32"
+            - "imm_rotation: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX, $IMM_ROTATION]]
+
+  - name: svadd[{_n}_{type}]{_mxz}
+    doc: Add
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    zeroing_method: { select: op1 }
+    assert_instr: ["{type_kind.f}add"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.f}add.{sve_type}" }
+
+  - name: svqsub[{_n}_{type}]
+    doc: Saturating subtract
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["{type_kind.su}qsub"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.su}qsub.x.{sve_type}" }
+
+  - name: svcnt[_{type[0]}]{_mxz}
+    doc: Count nonzero bits
+    arguments:
+      ["inactive: {sve_type[1]}", "pg: {predicate[0]}", "op: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [f32, u32]
+      - [f64, u64]
+      - [i8, u8]
+      - [i16, u16]
+      - [i32, u32]
+      - [i64, u64]
+      - [u8, u8]
+      - [u16, u16]
+      - [u32, u32]
+      - [u64, u64]
+    zeroing_method: { drop: inactive }
+    assert_instr: [cnt]
+    compose:
+      - LLVMLink: { name: "cnt.{sve_type[0]}" }
+
+  - name: svcls[_{type[0]}]{_mxz}
+    doc: Count leading sign bits
+    arguments:
+      ["inactive: {sve_type[1]}", "pg: {predicate[0]}", "op: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types: [[i8, u8], [i16, u16], [i32, u32], [i64, u64]]
+    zeroing_method: { drop: inactive }
+    assert_instr: [cls]
+    compose:
+      - LLVMLink: { name: "cls.{sve_type[0]}" }
+
+  - name: svclz[_{type[0]}]{_mxz}
+    doc: Count leading zero bits
+    arguments:
+      ["inactive: {sve_type[1]}", "pg: {predicate[0]}", "op: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [i8, u8]
+      - [i16, u16]
+      - [i32, u32]
+      - [i64, u64]
+      - [u8, u8]
+      - [u16, u16]
+      - [u32, u32]
+      - [u64, u64]
+    zeroing_method: { drop: inactive }
+    assert_instr: [clz]
+    compose:
+      - LLVMLink: { name: "clz.{sve_type[0]}" }
+
+  - name: svext{size_literal[1]}[_{type[0]}]{_mxz}
+    substitutions:
+      sign_or_zero:
+        match_kind: "{type[0]}"
+        default: Sign
+        unsigned: Zero
+      kind_literal: { match_kind: "{type[0]}", default: s, unsigned: u }
+    doc: "{sign_or_zero}-extend the low {size[1]} bits"
+    arguments:
+      ["inactive: {sve_type[0]}", "pg: {predicate[0]}", "op: {sve_type[0]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i16, i32, i64, u16, u32, u64], i8]
+      - [[i32, i64, u32, u64], i16]
+      - [[i64, u64], i32]
+    zeroing_method: { drop: inactive }
+    assert_instr: ["{type_kind[0].su}xt{size_literal[1]}"]
+    compose:
+      - LLVMLink:
+          name: "{type_kind[0].su}xt{size_literal[1]}.{sve_type[0]}"
+
+  - name: svsqrt[_{type}]{_mxz}
+    doc: Square root
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    zeroing_method: { drop: inactive }
+    assert_instr: [fsqrt]
+    compose:
+      - LLVMLink: { name: "fsqrt.{sve_type}" }
+
+  - name: svcmpeq[{_n}_{type}]
+    doc: Compare equal to
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{predicate}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [{ float: fcmeq, default: cmpeq }]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.f}cmpeq.{sve_type}" }
+
+  - name: svcmpeq_wide[{_n}_{type[0]}]
+    doc: Compare equal to
+    arguments:
+      ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{predicate[0]}"
+    types:
+      - [[i8, i16, i32], i64]
+    assert_instr: [cmpeq]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "cmpeq.wide.{sve_type[0]}" }
+
+  - name: svcmpge[{_n}_{type}]
+    doc: Compare greater than or equal to
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{predicate}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [{ float: fcmge, default: cmpge, unsigned: cmphs }]
+    n_variant_op: op2
+    compose:
+      - MatchKind:
+          - "{type}"
+          - default:
+              LLVMLink: { name: "{type_kind.f}cmpge.{sve_type}" }
+            unsigned:
+              LLVMLink: { name: "cmphs.{sve_type}" }
+
+  - name: svcmpge_wide[{_n}_{type[0]}]
+    doc: Compare greater than or equal to
+    arguments:
+      ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{predicate[0]}"
+    n_variant_op: op2
+    types:
+      - [[i8, i16, i32], i64]
+      - [[u8, u16, u32], u64]
+    assert_instr: [{ default: cmpge, unsigned: cmphs }]
+    compose:
+      - MatchKind:
+          - "{type[0]}"
+          - default:
+              LLVMLink: { name: "cmpge.wide.{sve_type[0]}" }
+            unsigned:
+              LLVMLink: { name: "cmphs.wide.{sve_type[0]}" }
+
+  - name: svcmpgt[{_n}_{type}]
+    doc: Compare greater than
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{predicate}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [{ float: fcmgt, default: cmpgt, unsigned: cmphi }]
+    n_variant_op: op2
+    compose:
+      - MatchKind:
+          - "{type}"
+          - default:
+              LLVMLink: { name: "{type_kind.f}cmpgt.{sve_type}" }
+            unsigned:
+              LLVMLink: { name: "cmphi.{sve_type}" }
+
+  - name: svcmpgt_wide[{_n}_{type[0]}]
+    doc: Compare greater than
+    arguments:
+      ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{predicate[0]}"
+    types:
+      - [[i8, i16, i32], i64]
+      - [[u8, u16, u32], u64]
+    assert_instr: [{ default: cmpgt, unsigned: cmphi }]
+    n_variant_op: op2
+    compose:
+      - MatchKind:
+          - "{type[0]}"
+          - default:
+              LLVMLink: { name: "cmpgt.wide.{sve_type[0]}" }
+            unsigned:
+              LLVMLink: { name: "cmphi.wide.{sve_type[0]}" }
+
+  - name: svcmple[{_n}_{type}]
+    doc: Compare less than or equal to
+    arguments: ["pg: svbool_t", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "svbool_t"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [{ float: fcmge, default: cmpge, unsigned: cmphs }]
+    n_variant_op: op2
+    compose:
+      - FnCall: ["svcmpge_{type}", [$pg, $op2, $op1]]
+
+  - name: svcmple_wide[{_n}_{type[0]}]
+    doc: Compare less than or equal to
+    arguments:
+      ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{predicate[0]}"
+    types:
+      - [[i8, i16, i32], i64]
+      - [[u8, u16, u32], u64]
+    assert_instr: [{ default: cmple, unsigned: cmpls }]
+    n_variant_op: op2
+    compose:
+      - MatchKind:
+          - "{type[0]}"
+          - default:
+              LLVMLink: { name: "cmple.wide.{sve_type[0]}" }
+            unsigned:
+              LLVMLink: { name: "cmpls.wide.{sve_type[0]}" }
+
+  - name: svcmplt[{_n}_{type}]
+    doc: Compare less than
+    arguments: ["pg: svbool_t", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "svbool_t"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [{ float: fcmgt, default: cmpgt, unsigned: cmphi }]
+    n_variant_op: op2
+    compose:
+      - FnCall: ["svcmpgt_{type}", [$pg, $op2, $op1]]
+
+  - name: svcmplt_wide[{_n}_{type[0]}]
+    doc: Compare less than
+    arguments:
+      ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{predicate[0]}"
+    types:
+      - [[i8, i16, i32], i64]
+      - [[u8, u16, u32], u64]
+    assert_instr: [{ default: cmplt, unsigned: cmplo }]
+    n_variant_op: op2
+    compose:
+      - MatchKind:
+          - "{type[0]}"
+          - default:
+              LLVMLink: { name: "cmplt.wide.{sve_type[0]}" }
+            unsigned:
+              LLVMLink: { name: "cmplo.wide.{sve_type[0]}" }
+
+  - name: svcmpne[{_n}_{type}]
+    doc: Compare not equal to
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{predicate}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [{ float: fcmne, default: cmpne }]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.f}cmpne.{sve_type}" }
+
+  - name: svcmpne_wide[{_n}_{type[0]}]
+    doc: Compare not equal to
+    arguments:
+      ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{predicate[0]}"
+    types: [[[i8, i16, i32], i64]]
+    assert_instr: [cmpne]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "cmpne.wide.{sve_type[0]}" }
+
+  - name: svcmpuo[{_n}_{type}]
+    doc: Compare unordered with
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{predicate}"
+    types: [f32, f64]
+    assert_instr: [fcmuo]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "fcmpuo.{sve_type}" }
+
+  - name: svcnt{size_literal}
+    doc: Count the number of {size}-bit elements in a vector
+    arguments: []
+    return_type: u64
+    types: [i8, i16, i32, i64]
+    assert_instr:
+      - default: { byte: rdvl, halfword: cnth, default: cntw, doubleword: cntd }
+    compose:
+      - FnCall: ["svcnt{size_literal}_pat", [], ["{{ svpattern::SV_ALL }}"]]
+
+  - name: svcnt{size_literal}_pat
+    doc: Count the number of {size}-bit elements in a vector
+    arguments: []
+    static_defs: ["const PATTERN: svpattern"]
+    return_type: u64
+    assert_instr:
+      - [rdvl, "PATTERN = {{ svpattern::SV_ALL }}"]
+      - ["cnt{size_literal}", "PATTERN = {{ svpattern::SV_MUL4 }}"]
+    types: [i8]
+    compose:
+      - LLVMLink:
+          name: cnt{size_literal}
+          arguments: ["pattern: svpattern"]
+      - FnCall: ["{llvm_link}", [$PATTERN]]
+
+  - name: svcnt{size_literal}_pat
+    doc: Count the number of {size}-bit elements in a vector
+    arguments: []
+    static_defs: ["const PATTERN: svpattern"]
+    return_type: u64
+    assert_instr: [["cnt{size_literal}", "PATTERN = {{ svpattern::SV_ALL }}"]]
+    types: [i16, i32, i64]
+    compose:
+      - LLVMLink:
+          name: cnt{size_literal}
+          arguments: ["pattern: svpattern"]
+      - FnCall: ["{llvm_link}", [$PATTERN]]
+
+  - name: svlen[_{type}]
+    doc: Count the number of elements in a full vector
+    arguments: ["_op: {sve_type}"]
+    return_type: "u64"
+    types: [i8, u8, i16, u16, i32, u32, f32, i64, u64, f64]
+    assert_instr: [{ default: { default: "cnt{size_literal}", byte: rdvl } }]
+    compose:
+      - FnCall: ["svcnt{size_literal}", []]
+
+  - name: svdup[_n]_{type}
+    doc: Broadcast a scalar value
+    arguments: ["op: {type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [mov]
+    compose:
+      - LLVMLink: { name: "dup.x.{sve_type}" }
+
+  - name: svdup[_n]_{type}{_mxz}
+    doc: Broadcast a scalar value
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    zeroing_method: { drop: inactive }
+    assert_instr: [mov]
+    compose:
+      - LLVMLink: { name: "dup.{sve_type}" }
+
+  - name: svdup[_n]_{type}
+    doc: Broadcast a scalar value
+    arguments: ["op: bool"]
+    return_type: "{predicate}"
+    types: [b8, b16, b32, b64]
+    assert_instr: [sbfx, whilelo]
+    compose:
+      - LLVMLink: { name: "dup.x.{sve_type}" }
+
+  - name: svdup_lane[_{type[0]}]
+    doc: Broadcast a scalar value
+    arguments: ["data: {sve_type[0]}", "index: {type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [f32, u32]
+      - [f64, u64]
+      - [i8, u8]
+      - [i16, u16]
+      - [i32, u32]
+      - [i64, u64]
+      - [u8, u8]
+      - [u16, u16]
+      - [u32, u32]
+      - [u64, u64]
+    assert_instr: [tbl]
+    compose:
+      - FnCall:
+          - svtbl_{type[0]}
+          - - $data
+            - FnCall: ["svdup_n_{type[1]}", [$index]]
+
+  - name: svdupq_lane[_{type}]
+    doc: Broadcast a quadword of scalars
+    arguments: ["data: {sve_type}", "index: u64"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [tbl]
+    compose:
+      - LLVMLink: { name: "dupq.lane.{sve_type}" }
+
+  - name: svdupq[_n]_{type}
+    doc: Broadcast a quadword of scalars
+    arguments:
+      - "x0: {type}"
+      - "x1: {type}"
+      - "x2: {type}"
+      - "x3: {type}"
+      - "x4: {type}"
+      - "x5: {type}"
+      - "x6: {type}"
+      - "x7: {type}"
+      - "x8: {type}"
+      - "x9: {type}"
+      - "x10: {type}"
+      - "x11: {type}"
+      - "x12: {type}"
+      - "x13: {type}"
+      - "x14: {type}"
+      - "x15: {type}"
+    return_type: "{sve_type}"
+    types: [i8, u8]
+    assert_instr: []
+    compose:
+      - LLVMLink:
+          name: llvm.experimental.vector.insert.{sve_type}.{neon_type}
+          arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"]
+      - Let:
+          - op
+          - FnCall:
+              - "{llvm_link}"
+              - - SvUndef
+                - FnCall:
+                    - "crate::mem::transmute"
+                    - - - $x0
+                        - $x1
+                        - $x2
+                        - $x3
+                        - $x4
+                        - $x5
+                        - $x6
+                        - $x7
+                        - $x8
+                        - $x9
+                        - $x10
+                        - $x11
+                        - $x12
+                        - $x13
+                        - $x14
+                        - $x15
+                - 0
+      - FnCall: ["svdupq_lane_{type}", [$op, 0]]
+
+  - name: svdupq[_n]_{type}
+    doc: Broadcast a quadword of scalars
+    types: [b8]
+    arguments:
+      - "x0: bool"
+      - "x1: bool"
+      - "x2: bool"
+      - "x3: bool"
+      - "x4: bool"
+      - "x5: bool"
+      - "x6: bool"
+      - "x7: bool"
+      - "x8: bool"
+      - "x9: bool"
+      - "x10: bool"
+      - "x11: bool"
+      - "x12: bool"
+      - "x13: bool"
+      - "x14: bool"
+      - "x15: bool"
+    return_type: "svbool_t"
+    assert_instr: []
+    compose:
+      - Let:
+          - op1
+          - FnCall:
+              - svdupq_n_s8
+              - - CastAs: [$x0, i8]
+                - CastAs: [$x1, i8]
+                - CastAs: [$x2, i8]
+                - CastAs: [$x3, i8]
+                - CastAs: [$x4, i8]
+                - CastAs: [$x5, i8]
+                - CastAs: [$x6, i8]
+                - CastAs: [$x7, i8]
+                - CastAs: [$x8, i8]
+                - CastAs: [$x9, i8]
+                - CastAs: [$x10, i8]
+                - CastAs: [$x11, i8]
+                - CastAs: [$x12, i8]
+                - CastAs: [$x13, i8]
+                - CastAs: [$x14, i8]
+                - CastAs: [$x15, i8]
+      - FnCall:
+          - svcmpne_wide_s8
+          - - FnCall: [svptrue_b8, []]
+            - $op1
+            - FnCall: [svdup_n_s64, [0]]
+
+  - name: svdupq[_n]_{type}
+    doc: Broadcast a quadword of scalars
+    arguments:
+      - "x0: {type}"
+      - "x1: {type}"
+      - "x2: {type}"
+      - "x3: {type}"
+      - "x4: {type}"
+      - "x5: {type}"
+      - "x6: {type}"
+      - "x7: {type}"
+    return_type: "{sve_type}"
+    types: [i16, u16]
+    assert_instr: []
+    compose:
+      - LLVMLink:
+          name: llvm.experimental.vector.insert.{sve_type}.{neon_type}
+          arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"]
+      - Let:
+          - op
+          - FnCall:
+              - "{llvm_link}"
+              - - SvUndef
+                - FnCall:
+                    - "crate::mem::transmute"
+                    - - [$x0, $x1, $x2, $x3, $x4, $x5, $x6, $x7]
+                - 0
+      - FnCall: ["svdupq_lane_{type}", [$op, 0]]
+
+  - name: svdupq[_n]_{type}
+    doc: Broadcast a quadword of scalars
+    types: [b16]
+    arguments:
+      - "x0: bool"
+      - "x1: bool"
+      - "x2: bool"
+      - "x3: bool"
+      - "x4: bool"
+      - "x5: bool"
+      - "x6: bool"
+      - "x7: bool"
+    return_type: svbool_t
+    assert_instr: []
+    compose:
+      - Let:
+          - op1
+          - FnCall:
+              - svdupq_n_s16
+              - - CastAs: [$x0, i16]
+                - CastAs: [$x1, i16]
+                - CastAs: [$x2, i16]
+                - CastAs: [$x3, i16]
+                - CastAs: [$x4, i16]
+                - CastAs: [$x5, i16]
+                - CastAs: [$x6, i16]
+                - CastAs: [$x7, i16]
+      - FnCall:
+          - svcmpne_wide_s16
+          - - FnCall: [svptrue_b16, []]
+            - $op1
+            - FnCall: [svdup_n_s64, [0]]
+
+  - name: svdupq[_n]_{type}
+    doc: Broadcast a quadword of scalars
+    arguments: ["x0: {type}", "x1: {type}", "x2: {type}", "x3: {type}"]
+    return_type: "{sve_type}"
+    types: [f32, i32, u32]
+    assert_instr: []
+    compose:
+      - LLVMLink:
+          name: llvm.experimental.vector.insert.{sve_type}.{neon_type}
+          arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"]
+      - Let:
+          - op
+          - FnCall:
+              - "{llvm_link}"
+              - - SvUndef
+                - FnCall: ["crate::mem::transmute", [[$x0, $x1, $x2, $x3]]]
+                - 0
+      - FnCall: ["svdupq_lane_{type}", [$op, 0]]
+
+  - name: svdupq[_n]_{type}
+    doc: Broadcast a quadword of scalars
+    types: [b32]
+    arguments: ["x0: bool", "x1: bool", "x2: bool", "x3: bool"]
+    return_type: "svbool_t"
+    assert_instr: []
+    compose:
+      - Let:
+          - op1
+          - FnCall:
+              - svdupq_n_s32
+              - - CastAs: [$x0, i32]
+                - CastAs: [$x1, i32]
+                - CastAs: [$x2, i32]
+                - CastAs: [$x3, i32]
+      - FnCall:
+          - svcmpne_wide_s32
+          - - FnCall: [svptrue_b32, []]
+            - $op1
+            - FnCall: [svdup_n_s64, [0]]
+
+  - name: svdupq[_n]_{type}
+    doc: Broadcast a quadword of scalars
+    arguments: ["x0: {type}", "x1: {type}"]
+    return_type: "{sve_type}"
+    types: [f64, i64, u64]
+    assert_instr: []
+    compose:
+      - LLVMLink:
+          name: llvm.experimental.vector.insert.{sve_type}.{neon_type}
+          arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"]
+      - Let:
+          - op
+          - FnCall:
+              - "{llvm_link}"
+              - - SvUndef
+                - FnCall: ["crate::mem::transmute", [[$x0, $x1]]]
+                - 0
+      - FnCall: ["svdupq_lane_{type}", [$op, 0]]
+
+  - name: svdupq[_n]_{type}
+    doc: Broadcast a quadword of scalars
+    types: [b64]
+    arguments: ["x0: bool", "x1: bool"]
+    return_type: "svbool_t"
+    assert_instr: []
+    compose:
+      - Let:
+          - op1
+          - FnCall: [svdupq_n_s64, [CastAs: [$x0, i64], CastAs: [$x1, i64]]]
+      - FnCall:
+          - svcmpne_s64
+          - - FnCall: [svptrue_b64, []]
+            - $op1
+            - FnCall: [svdup_n_s64, [0]]
+
+  - name: svcreate2[_{type}]
+    doc: Create a tuple of two vectors
+    arguments: ["x0: {sve_type}", "x1: {sve_type}"]
+    return_type: "{sve_type_x2}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: []
+    compose:
+      - LLVMLink: { name: "tuple.create2.{sve_type_x2}.{sve_type}" }
+
+  - name: svcreate3[_{type}]
+    doc: Create a tuple of three vectors
+    arguments: ["x0: {sve_type}", "x1: {sve_type}", "x2: {sve_type}"]
+    return_type: "{sve_type_x3}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: []
+    compose:
+      - LLVMLink: { name: "tuple.create3.{sve_type_x3}.{sve_type}" }
+
+  - name: svcreate4[_{type}]
+    doc: Create a tuple of four vectors
+    arguments:
+      ["x0: {sve_type}", "x1: {sve_type}", "x2: {sve_type}", "x3: {sve_type}"]
+    return_type: "{sve_type_x4}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: []
+    compose:
+      - LLVMLink: { name: "tuple.create4.{sve_type_x4}.{sve_type}" }
+
+  - name: svundef_{type}
+    safety:
+      unsafe: [uninitialized]
+    doc: Create an uninitialized vector
+    arguments: []
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: []
+    compose:
+      - SvUndef
+
+  - name: svundef2_{type}
+    safety:
+      unsafe: [uninitialized]
+    doc: Create an uninitialized tuple of two vectors
+    arguments: []
+    return_type: "{sve_type_x2}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: []
+    compose:
+      - SvUndef
+
+  - name: svundef3_{type}
+    safety:
+      unsafe: [uninitialized]
+    doc: Create an uninitialized tuple of three vectors
+    arguments: []
+    return_type: "{sve_type_x3}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: []
+    compose:
+      - SvUndef
+
+  - name: svundef4_{type}
+    safety:
+      unsafe: [uninitialized]
+    doc: Create an uninitialized tuple of four vectors
+    arguments: []
+    return_type: "{sve_type_x4}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: []
+    compose:
+      - SvUndef
+
+  - name: svindex_{type}
+    doc: Create linear series
+    arguments: ["base: {type}", "step: {type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [index]
+    compose:
+      - LLVMLink: { name: "index.{sve_type}" }
+
+  - name: svget2[_{type}]
+    doc: Extract one vector from a tuple of two vectors
+    arguments: ["tuple: {sve_type_x2}"]
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, range: [0, 1] }]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: []
+    compose:
+      - LLVMLink:
+          name: tuple.get.{sve_type}.{sve_type_x2}
+          arguments: ["tuple: {sve_type_x2}", "imm_index: i32"]
+      - FnCall: ["{llvm_link}", [$tuple, $IMM_INDEX]]
+
+  - name: svget3[_{type}]
+    doc: Extract one vector from a tuple of three vectors
+    arguments: ["tuple: {sve_type_x3}"]
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, range: [0, 2] }]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: []
+    compose:
+      - LLVMLink:
+          name: tuple.get.{sve_type}.{sve_type_x3}
+          arguments: ["tuple: {sve_type_x3}", "imm_index: i32"]
+      - FnCall: ["{llvm_link}", [$tuple, $IMM_INDEX]]
+
+  - name: svget4[_{type}]
+    doc: Extract one vector from a tuple of four vectors
+    arguments: ["tuple: {sve_type_x4}"]
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, range: [0, 3] }]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: []
+    compose:
+      - LLVMLink:
+          name: tuple.get.{sve_type}.{sve_type_x4}
+          arguments: ["tuple: {sve_type_x4}", "imm_index: i32"]
+      - FnCall: ["{llvm_link}", [$tuple, $IMM_INDEX]]
+
+  - name: svset2[_{type}]
+    doc: Change one vector in a tuple of two vectors
+    arguments: ["tuple: {sve_type_x2}", "x: {sve_type}"]
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, range: [0, 1] }]
+    return_type: "{sve_type_x2}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: []
+    compose:
+      - LLVMLink:
+          name: tuple.set.{sve_type_x2}.{sve_type}
+          arguments: ["tuple: {sve_type_x2}", "imm_index: i32", "x: {sve_type}"]
+      - FnCall: ["{llvm_link}", [$tuple, $IMM_INDEX, $x]]
+
+  - name: svset3[_{type}]
+    doc: Change one vector in a tuple of three vectors
+    arguments: ["tuple: {sve_type_x3}", "x: {sve_type}"]
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, range: [0, 2] }]
+    return_type: "{sve_type_x3}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: []
+    compose:
+      - LLVMLink:
+          name: tuple.set.{sve_type_x3}.{sve_type}
+          arguments: ["tuple: {sve_type_x3}", "imm_index: i32", "x: {sve_type}"]
+      - FnCall: ["{llvm_link}", [$tuple, $IMM_INDEX, $x]]
+
+  - name: svset4[_{type}]
+    doc: Change one vector in a tuple of four vectors
+    arguments: ["tuple: {sve_type_x4}", "x: {sve_type}"]
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, range: [0, 3] }]
+    return_type: "{sve_type_x4}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: []
+    compose:
+      - LLVMLink:
+          name: "tuple.set.{sve_type_x4}.{sve_type}"
+          arguments: ["tuple: {sve_type_x4}", "imm_index: i32", "x: {sve_type}"]
+      - FnCall: ["{llvm_link}", [$tuple, $IMM_INDEX, $x]]
+
+  - name: svzip1[_{type}]
+    doc: Interleave elements from low halves of two inputs
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [zip1]
+    compose:
+      - LLVMLink: { name: "zip1.{sve_type}" }
+
+  - name: svzip1_{type}
+    doc: Interleave elements from low halves of two inputs
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [b8, b16, b32, b64]
+    assert_instr: [zip1]
+    compose:
+      - LLVMLink: { name: "zip1.{sve_type}" }
+
+  - name: svzip1q[_{type}]
+    doc: Interleave quadwords from low halves of two inputs
+    target_features: [f64mm]
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [zip1]
+    compose:
+      - LLVMLink: { name: "zip1q.{sve_type}" }
+
+  - name: svzip2[_{type}]
+    doc: Interleave elements from high halves of two inputs
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [zip2]
+    compose:
+      - LLVMLink: { name: "zip2.{sve_type}" }
+
+  - name: svzip2_{type}
+    doc: Interleave elements from high halves of two inputs
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [b8, b16, b32, b64]
+    assert_instr: [zip2]
+    compose:
+      - LLVMLink: { name: "zip2.{sve_type}" }
+
+  - name: svzip2q[_{type}]
+    doc: Interleave quadwords from high halves of two inputs
+    target_features: [f64mm]
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [zip2]
+    compose:
+      - LLVMLink: { name: "zip2q.{sve_type}" }
+
+  - name: svuzp1[_{type}]
+    doc: Concatenate even elements from two inputs
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [uzp1]
+    compose:
+      - LLVMLink: { name: "uzp1.{sve_type}" }
+
+  - name: svuzp1_{type}
+    doc: Concatenate even elements from two inputs
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [b8, b16, b32, b64]
+    assert_instr: [uzp1]
+    compose:
+      - LLVMLink: { name: "uzp1.{sve_type}" }
+
+  - name: svuzp1q[_{type}]
+    doc: Concatenate even quadwords from two inputs
+    target_features: [f64mm]
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [uzp1]
+    compose:
+      - LLVMLink: { name: "uzp1q.{sve_type}" }
+
+  - name: svuzp2[_{type}]
+    doc: Concatenate odd elements from two inputs
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [uzp2]
+    compose:
+      - LLVMLink: { name: "uzp2.{sve_type}" }
+
+  - name: svuzp2_{type}
+    doc: Concatenate odd elements from two inputs
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [b8, b16, b32, b64]
+    assert_instr: [uzp2]
+    compose:
+      - LLVMLink: { name: "uzp2.{sve_type}" }
+
+  - name: svuzp2q[_{type}]
+    doc: Concatenate odd quadwords from two inputs
+    target_features: [f64mm]
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [uzp2]
+    compose:
+      - LLVMLink: { name: "uzp2q.{sve_type}" }
+
+  - name: svtrn1[_{type}]
+    doc: Interleave even elements from two inputs
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [trn1]
+    compose:
+      - LLVMLink: { name: "trn1.{sve_type}" }
+
+  - name: svtrn1_{type}
+    doc: Interleave even elements from two inputs
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [b8, b16, b32, b64]
+    assert_instr: [trn1]
+    compose:
+      - LLVMLink: { name: "trn1.{sve_type}" }
+
+  - name: svtrn1q[_{type}]
+    doc: Interleave even quadwords from two inputs
+    target_features: [f64mm]
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [trn1]
+    compose:
+      - LLVMLink: { name: "trn1q.{sve_type}" }
+
+  - name: svtrn2[_{type}]
+    doc: Interleave odd elements from two inputs
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [trn2]
+    compose:
+      - LLVMLink: { name: "trn2.{sve_type}" }
+
+  - name: svtrn2_{type}
+    doc: Interleave odd elements from two inputs
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [b8, b16, b32, b64]
+    assert_instr: [trn2]
+    compose:
+      - LLVMLink: { name: "trn2.{sve_type}" }
+
+  - name: svtrn2q[_{type}]
+    doc: Interleave odd quadwords from two inputs
+    target_features: [f64mm]
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [trn2]
+    compose:
+      - LLVMLink: { name: "trn2q.{sve_type}" }
+
+  - name: svrev[_{type}]
+    doc: Reverse all elements
+    arguments: ["op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [rev]
+    compose:
+      - LLVMLink: { name: "rev.{sve_type}" }
+
+  - name: svrev_{type}
+    doc: Reverse all elements
+    arguments: ["op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [b8, b16, b32, b64]
+    assert_instr: [rev]
+    compose:
+      - LLVMLink: { name: "rev.{sve_type}" }
+
+  - name: svrevb[_{type}]{_mxz}
+    doc: Reverse bytes within elements
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i16, i32, i64, u16, u32, u64]
+    zeroing_method: { drop: "inactive" }
+    assert_instr: [revb]
+    compose:
+      - LLVMLink: { name: "revb.{sve_type}" }
+
+  - name: svrevh[_{type}]{_mxz}
+    doc: Reverse halfwords within elements
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i32, i64, u32, u64]
+    zeroing_method: { drop: "inactive" }
+    assert_instr: [revh]
+    compose:
+      - LLVMLink: { name: "revh.{sve_type}" }
+
+  - name: svrevw[_{type}]{_mxz}
+    doc: Reverse words within elements
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i64, u64]
+    zeroing_method: { drop: "inactive" }
+    assert_instr: [revw]
+    compose:
+      - LLVMLink: { name: "revw.{sve_type}" }
+
+  - name: svrbit[_{type}]{_mxz}
+    doc: Reverse bits
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    zeroing_method: { drop: "inactive" }
+    assert_instr: [rbit]
+    compose:
+      - LLVMLink: { name: "rbit.{sve_type}" }
+
+  - name: svext[_{type}]
+    doc: Extract vector from pair of vectors
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    static_defs: ["const IMM3: i32"]
+    constraints: [{ variable: IMM3, sve_max_elems_type: "{type}" }]
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [[ext, "IMM3 = 1"]]
+    compose:
+      - LLVMLink:
+          name: ext.{sve_type}
+          arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]]
+
+  - name: svsplice[_{type}]
+    doc: Splice two vectors under predicate control
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [splice]
+    compose:
+      - LLVMLink: { name: "splice.{sve_type}" }
+
+  - name: svinsr[_n_{type}]
+    doc: Insert scalar in shifted vector
+    arguments: ["op1: {sve_type}", "op2: {type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [insr]
+    compose:
+      - LLVMLink: { name: "insr.{sve_type}" }
+
+  - name: svld1[_{type}]
+    doc: Unextended load
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments: ["pg: {predicate}", "base: *{type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["ld1{size_literal}"]
+    test: { load: 0 }
+    compose:
+      - LLVMLink: { name: "ld1.{sve_type}" }
+
+  - name: svld1_vnum[_{type}]
+    doc: Unextended load
+    safety:
+      unsafe:
+        - pointer_offset_vnum: predicated
+        - dereference: predicated
+    arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["ld1{size_literal}"]
+    test: { load: 0 }
+    compose:
+      - FnCall:
+          - "svld1_{type}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+
+  - name: svld1_gather_[{type[0]}]index[_{type[1]}]
+    doc: Unextended load
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments:
+      ["pg: {predicate[0]}", "base: *{type[1]}", "indices: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [[i32, u32], [f32, i32, u32]]
+      - [[i64, u64], [f64, i64, u64]]
+    assert_instr: ["ld1{size_literal[0]}"]
+    test: { load: 1 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "ld1.gather.{type_kind[0].su}xtw.index.{sve_type[1]}"
+            doubleword:
+              LLVMLink:
+                name: "ld1.gather.index.{sve_type[1]}"
+
+  - name: svld1_gather_[{type[0]}]offset[_{type[1]}]
+    doc: Unextended load
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments:
+      ["pg: {predicate[0]}", "base: *{type[1]}", "offsets: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [[i32, u32], [f32, i32, u32]]
+      - [[i64, u64], [f64, i64, u64]]
+    assert_instr: ["ld1{size_literal[0]}"]
+    test: { load: 1 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "ld1.gather.{type_kind[0].su}xtw.{sve_type[1]}"
+            doubleword:
+              LLVMLink:
+                name: "ld1.gather.{sve_type[1]}"
+
+  - name: svld1_gather[_{type[0]}base]_offset_{type[1]}
+    doc: Unextended load
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [f32, i32, u32]]
+      - [u64, [f64, i64, u64]]
+    assert_instr: ["ld1{size_literal[0]}"]
+    test: { load: 1 }
+    compose:
+      - LLVMLink:
+          name: "ld1.gather.scalar.offset.{sve_type[1]}.{sve_type[0]}"
+
+  - name: svld1_gather[_{type[0]}base]_{type[1]}
+    doc: Unextended load
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [f32, i32, u32]]
+      - [u64, [f64, i64, u64]]
+    assert_instr: ["ld1{size_literal[0]}"]
+    test: { load: 1 }
+    compose:
+      - FnCall:
+          - "svld1_gather_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - 0
+
+  - name: svld1_gather[_{type[0]}base]_index_{type[1]}
+    doc: Unextended load
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [f32, i32, u32]]
+      - [u64, [f64, i64, u64]]
+    assert_instr: ["ld1{size_literal[0]}"]
+    test: { load: 1 }
+    compose:
+      - FnCall:
+          - "svld1_gather_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]]
+
+  - name: svld1s{size_literal[2]}_gather_[{type[0]}]index_{type[1]}
+    doc: Load {size[2]}-bit data and sign-extend
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments:
+      ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [[i32, u32], [i32, u32], i16]
+      - [[i64, u64], [i64, u64], [i16, i32]]
+    assert_instr: ["ld1s{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "ld1.gather.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+            doubleword:
+              LLVMLink:
+                name: "ld1.gather.index.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+      - FnCall:
+          - simd_cast
+          - - FnCall: ["{llvm_link}", [$pg, $base, $indices]]
+
+  - name: svld1u{size_literal[2]}_gather_[{type[0]}]index_{type[1]}
+    doc: Load {size[2]}-bit data and zero-extend
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments:
+      ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [[i32, u32], [u32, i32], u16]
+      - [[i64, u64], [u64, i64], [u16, u32]]
+    assert_instr: ["ld1{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "ld1.gather.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+            doubleword:
+              LLVMLink:
+                name: "ld1.gather.index.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+      - FnCall:
+          - simd_cast
+          - - FnCall: ["{llvm_link}", [$pg, $base, $indices]]
+          - - Type: "{sve_type[1] as {type[2]}}"
+            - _
+
+  - name: svld1s{size_literal[2]}_gather_[{type[0]}]offset_{type[1]}
+    doc: Load {size[2]}-bit data and sign-extend
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments:
+      ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [[i32, u32], [i32, u32], [i8, i16]]
+      - [[i64, u64], [i64, u64], [i8, i16, i32]]
+    assert_instr: ["ld1s{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "ld1.gather.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+            doubleword:
+              LLVMLink:
+                name: "ld1.gather.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+      - FnCall:
+          - simd_cast
+          - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]]
+
+  - name: svld1u{size_literal[2]}_gather_[{type[0]}]offset_{type[1]}
+    doc: Load {size[2]}-bit data and zero-extend
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments:
+      ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [[i32, u32], [u32, i32], [u8, u16]]
+      - [[i64, u64], [u64, i64], [u8, u16, u32]]
+    assert_instr: ["ld1{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "ld1.gather.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+            doubleword:
+              LLVMLink:
+                name: "ld1.gather.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+      - FnCall:
+          - simd_cast
+          - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]]
+          - - Type: "{sve_type[1] as {type[2]}}"
+            - _
+
+  - name: svld1s{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]}
+    doc: Load {size[2]}-bit data and sign-extend
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [i32, u32], [i8, i16]]
+      - [u64, [i64, u64], [i8, i16, i32]]
+    assert_instr: ["ld1s{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - LLVMLink:
+          name: "ld1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}"
+          return_type: "{sve_type[1] as {type[2]}}"
+      - FnCall:
+          - simd_cast
+          - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]]
+
+  - name: svld1u{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]}
+    doc: Load {size[2]}-bit data and zero-extend
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [u32, i32], [u8, u16]]
+      - [u64, [u64, i64], [u8, u16, u32]]
+    assert_instr: ["ld1{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - LLVMLink:
+          name: "ld1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}"
+          return_type: "{sve_type[1] as {type[2]}}"
+      - FnCall:
+          - simd_cast
+          - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]]
+          - - Type: "{sve_type[1] as {type[2]}}"
+            - _
+
+  - name: svld1s{size_literal[2]}_gather[_{type[0]}base]_{type[1]}
+    doc: Load {size[2]}-bit data and sign-extend
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [i32, u32], [i8, i16]]
+      - [u64, [i64, u64], [i8, i16, i32]]
+    assert_instr: ["ld1s{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - FnCall:
+          - "svld1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - 0
+
+  - name: svld1u{size_literal[2]}_gather[_{type[0]}base]_{type[1]}
+    doc: Load {size[2]}-bit data and zero-extend
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [i32, u32], [u8, u16]]
+      - [u64, [i64, u64], [u8, u16, u32]]
+    assert_instr: ["ld1{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - FnCall:
+          - "svld1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - 0
+
+  - name: svld1s{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]}
+    doc: Load {size[2]}-bit data and sign-extend
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [i32, u32], i16]
+      - [u64, [i64, u64], [i16, i32]]
+    assert_instr: ["ld1s{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - FnCall:
+          - "svld1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]]
+
+  - name: svld1u{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]}
+    doc: Load {size[2]}-bit data and zero-extend
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [i32, u32], u16]
+      - [u64, [i64, u64], [u16, u32]]
+    assert_instr: ["ld1{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - FnCall:
+          - "svld1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]]
+
+  - name: svldnt1[_{type}]
+    doc: Unextended load, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - non_temporal
+    arguments: ["pg: {predicate}", "base: *{type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["ldnt1{size_literal}"]
+    test: { load: 0 }
+    compose:
+      - LLVMLink: { name: "ldnt1.{sve_type}" }
+
+  - name: svldnt1_vnum[_{type}]
+    doc: Unextended load, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset_vnum: predicated
+        - dereference: predicated
+        - non_temporal
+    arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["ldnt1{size_literal}"]
+    test: { load: 0 }
+    compose:
+      - FnCall:
+          - "svldnt1_{type}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+
+  - name: svld1s{size_literal[1]}_{type[0]}
+    doc: Load {size[1]}-bit data and sign-extend
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments: ["pg: {predicate[0]}", "base: *{type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i16, i32, i64, u16, u32, u64], i8]
+      - [[i32, i64, u32, u64], i16]
+      - [[i64, u64], i32]
+    assert_instr: ["ld1s{size_literal[1]}"]
+    test: { load: 1 }
+    compose:
+      - LLVMLink:
+          name: "ld1.{sve_type[0] as {type[1]}}"
+          arguments: ["pg: {predicate[0]}", "base: *{type[1]}"]
+          return_type: "{sve_type[0] as {type[1]}}"
+      - FnCall:
+          - "simd_cast"
+          - - FnCall: ["{llvm_link}", [$pg, $base]]
+
+  - name: svld1u{size_literal[1]}_{type[0]}
+    doc: Load {size[1]}-bit data and zero-extend
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments: ["pg: {predicate[0]}", "base: *{type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i16, i32, i64, u16, u32, u64], u8]
+      - [[i32, i64, u32, u64], u16]
+      - [[i64, u64], u32]
+    assert_instr: ["ld1{size_literal[1]}"]
+    test: { load: 1 }
+    compose:
+      - LLVMLink:
+          name: "ld1.{sve_type[0] as {type[1]}}"
+          arguments: ["pg: {predicate[0]}", "base: *{type[1]}"]
+          return_type: "{sve_type[0] as {type[1]}}"
+      - FnCall:
+          - "simd_cast"
+          - - FnCall: ["{llvm_link}", [$pg, $base]]
+          - [Type: "{sve_type[0] as {type[1]}}", _]
+
+  - name: svld1s{size_literal[1]}_vnum_{type[0]}
+    doc: Load {size[1]}-bit data and sign-extend
+    safety:
+      unsafe:
+        - pointer_offset_vnum: predicated
+        - dereference: predicated
+    arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i16, i32, i64, u16, u32, u64], i8]
+      - [[i32, i64, u32, u64], i16]
+      - [[i64, u64], i32]
+    assert_instr: ["ld1s{size_literal[1]}"]
+    test: { load: 1 }
+    compose:
+      - FnCall:
+          - "svld1s{size_literal[1]}_{type[0]}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs:
+                          [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+
+  - name: svld1u{size_literal[1]}_vnum_{type[0]}
+    doc: Load {size[1]}-bit data and zero-extend
+    safety:
+      unsafe:
+        - pointer_offset_vnum: predicated
+        - dereference: predicated
+    arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i16, i32, i64, u16, u32, u64], u8]
+      - [[i32, i64, u32, u64], u16]
+      - [[i64, u64], u32]
+    assert_instr: ["ld1{size_literal[1]}"]
+    test: { load: 1 }
+    compose:
+      - FnCall:
+          - "svld1u{size_literal[1]}_{type[0]}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs:
+                          [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+
+  - name: svld2[_{type}]
+    doc: Load two-element tuples into two vectors
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments: ["pg: {predicate}", "base: *{type}"]
+    return_type: "{sve_type_x2}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["ld2{size_literal}"]
+    test: { load: 0 }
+    compose:
+      - LLVMLink: { name: "ld2.{sve_type_x2}.{predicate}" }
+
+  - name: svld2_vnum[_{type}]
+    doc: Load two-element tuples into two vectors
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"]
+    return_type: "{sve_type_x2}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["ld2{size_literal}"]
+    test: { load: 0 }
+    compose:
+      - FnCall:
+          - "svld2_{type}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+
+  - name: svld3[_{type}]
+    doc: Load three-element tuples into three vectors
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments: ["pg: {predicate}", "base: *{type}"]
+    return_type: "{sve_type_x3}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["ld3{size_literal}"]
+    test: { load: 0 }
+    compose:
+      - LLVMLink: { name: "ld3.{sve_type_x3}.{predicate}" }
+
+  - name: svld3_vnum[_{type}]
+    doc: Load three-element tuples into three vectors
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"]
+    return_type: "{sve_type_x3}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["ld3{size_literal}"]
+    test: { load: 0 }
+    compose:
+      - FnCall:
+          - "svld3_{type}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+
+  - name: svld4[_{type}]
+    doc: Load four-element tuples into four vectors
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments: ["pg: {predicate}", "base: *{type}"]
+    return_type: "{sve_type_x4}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["ld4{size_literal}"]
+    test: { load: 0 }
+    compose:
+      - LLVMLink: { name: "ld4.{sve_type_x4}.{predicate}" }
+
+  - name: svld4_vnum[_{type}]
+    doc: Load four-element tuples into four vectors
+    safety:
+      unsafe:
+        - pointer_offset_vnum: predicated
+        - dereference: predicated
+    arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"]
+    return_type: "{sve_type_x4}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["ld4{size_literal}"]
+    test: { load: 0 }
+    compose:
+      - FnCall:
+          - "svld4_{type}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+
+  - name: svld1rq[_{type}]
+    doc: Load and replicate 128 bits of data
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments: ["pg: {predicate}", "base: *{type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["ld1rq{size_literal}"]
+    test: { load: 0 }
+    compose:
+      - LLVMLink: { name: "ld1rq.{sve_type}" }
+
+  - name: svld1ro[_{type}]
+    doc: Load and replicate 256 bits of data
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    target_features: [f64mm]
+    arguments: ["pg: {predicate}", "base: *{type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["ld1ro{size_literal}"]
+    test: { load: 0 }
+    compose:
+      - LLVMLink: { name: "ld1ro.{sve_type}" }
+
+  - name: svldnf1[_{type}]
+    doc: Unextended load, non-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_non_faulting
+        - dereference: predicated_non_faulting
+        - unpredictable_on_fault
+    arguments: ["pg: {predicate}", "base: *{type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["ldnf1{size_literal}"]
+    test: { load: 0 }
+    compose:
+      - LLVMLink: { name: "ldnf1.{sve_type}" }
+
+  - name: svldnf1_vnum[_{type}]
+    doc: Unextended load, non-faulting
+    safety:
+      unsafe:
+        - pointer_offset_vnum: predicated_non_faulting
+        - dereference: predicated_non_faulting
+        - unpredictable_on_fault
+    arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["ldnf1{size_literal}"]
+    test: { load: 0 }
+    compose:
+      - FnCall:
+          - "svldnf1_{type}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+
+  - name: svldnf1s{size_literal[1]}_{type[0]}
+    doc: Load {size[1]}-bit data and sign-extend, non-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_non_faulting
+        - dereference: predicated_non_faulting
+        - unpredictable_on_fault
+    arguments: ["pg: {predicate[0]}", "base: *{type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i16, i32, i64, u16, u32, u64], i8]
+      - [[i32, i64, u32, u64], i16]
+      - [[i64, u64], i32]
+    assert_instr: ["ldnf1s{size_literal[1]}"]
+    test: { load: 1 }
+    compose:
+      - LLVMLink:
+          name: "ldnf1.{sve_type[0] as {type[1]}}"
+          arguments: ["pg: {predicate[0]}", "base: *{type[1]}"]
+          return_type: "{sve_type[0] as {type[1]}}"
+      - FnCall:
+          - "simd_cast"
+          - - FnCall: ["{llvm_link}", [$pg, $base]]
+
+  - name: svldnf1u{size_literal[1]}_{type[0]}
+    doc: Load {size[1]}-bit data and zero-extend, non-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_non_faulting
+        - dereference: predicated_non_faulting
+        - unpredictable_on_fault
+    arguments: ["pg: {predicate[0]}", "base: *{type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i16, i32, i64, u16, u32, u64], u8]
+      - [[i32, i64, u32, u64], u16]
+      - [[i64, u64], u32]
+    assert_instr: ["ldnf1{size_literal[1]}"]
+    test: { load: 1 }
+    compose:
+      - LLVMLink:
+          name: "ldnf1.{sve_type[0] as {type[1]}}"
+          arguments: ["pg: {predicate[0]}", "base: *{type[1]}"]
+          return_type: "{sve_type[0] as {type[1]}}"
+      - FnCall:
+          - "simd_cast"
+          - - FnCall: ["{llvm_link}", [$pg, $base]]
+          - [Type: "{sve_type[0] as {type[1]}}", _]
+
+  - name: svldnf1s{size_literal[1]}_vnum_{type[0]}
+    doc: Load {size[1]}-bit data and sign-extend, non-faulting
+    safety:
+      unsafe:
+        - pointer_offset_vnum: predicated_non_faulting
+        - dereference: predicated_non_faulting
+        - unpredictable_on_fault
+    arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i16, i32, i64, u16, u32, u64], i8]
+      - [[i32, i64, u32, u64], i16]
+      - [[i64, u64], i32]
+    assert_instr: ["ldnf1s{size_literal[1]}"]
+    test: { load: 1 }
+    compose:
+      - FnCall:
+          - "svldnf1s{size_literal[1]}_{type[0]}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs:
+                          [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+
+  - name: svldnf1u{size_literal[1]}_vnum_{type[0]}
+    doc: Load {size[1]}-bit data and zero-extend, non-faulting
+    safety:
+      unsafe:
+        - pointer_offset_vnum: predicated_non_faulting
+        - dereference: predicated_non_faulting
+        - unpredictable_on_fault
+    arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i16, i32, i64, u16, u32, u64], u8]
+      - [[i32, i64, u32, u64], u16]
+      - [[i64, u64], u32]
+    assert_instr: ["ldnf1{size_literal[1]}"]
+    test: { load: 1 }
+    compose:
+      - FnCall:
+          - "svldnf1u{size_literal[1]}_{type[0]}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs:
+                          [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+
+  - name: svldff1[_{type}]
+    doc: Unextended load, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+    arguments: ["pg: {predicate}", "base: *{type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["ldff1{size_literal}"]
+    test: { load: 0 }
+    compose:
+      - LLVMLink: { name: "ldff1.{sve_type}" }
+
+  - name: svldff1_vnum[_{type}]
+    doc: Unextended load, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset_vnum: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+    arguments: ["pg: {predicate}", "base: *{type}", "vnum: i64"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["ldff1{size_literal}"]
+    test: { load: 0 }
+    compose:
+      - FnCall:
+          - "svldff1_{type}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+
+  - name: svldff1s{size_literal[1]}_{type[0]}
+    doc: Load {size[1]}-bit data and sign-extend, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+    arguments: ["pg: {predicate[0]}", "base: *{type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i16, i32, i64, u16, u32, u64], i8]
+      - [[i32, i64, u32, u64], i16]
+      - [[i64, u64], i32]
+    assert_instr: ["ldff1s{size_literal[1]}"]
+    test: { load: 1 }
+    compose:
+      - LLVMLink:
+          name: "ldff1.{sve_type[0] as {type[1]}}"
+          arguments: ["pg: {predicate[0]}", "base: *{type[1]}"]
+          return_type: "{sve_type[0] as {type[1]}}"
+      - FnCall:
+          - "simd_cast"
+          - - FnCall: ["{llvm_link}", [$pg, $base]]
+
+  - name: svldff1u{size_literal[1]}_{type[0]}
+    doc: Load {size[1]}-bit data and zero-extend, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+    arguments: ["pg: {predicate[0]}", "base: *{type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i16, i32, i64, u16, u32, u64], u8]
+      - [[i32, i64, u32, u64], u16]
+      - [[i64, u64], u32]
+    assert_instr: ["ldff1{size_literal[1]}"]
+    test: { load: 1 }
+    compose:
+      - LLVMLink:
+          name: "ldff1.{sve_type[0] as {type[1]}}"
+          arguments: ["pg: {predicate[0]}", "base: *{type[1]}"]
+          return_type: "{sve_type[0] as {type[1]}}"
+      - FnCall:
+          - "simd_cast"
+          - - FnCall: ["{llvm_link}", [$pg, $base]]
+          - [Type: "{sve_type[0] as {type[1]}}", _]
+
+  - name: svldff1s{size_literal[1]}_vnum_{type[0]}
+    doc: Load {size[1]}-bit data and sign-extend, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset_vnum: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+    arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i16, i32, i64, u16, u32, u64], i8]
+      - [[i32, i64, u32, u64], i16]
+      - [[i64, u64], i32]
+    assert_instr: ["ldff1s{size_literal[1]}"]
+    test: { load: 1 }
+    compose:
+      - FnCall:
+          - "svldff1s{size_literal[1]}_{type[0]}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs:
+                          [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+
+  - name: svldff1u{size_literal[1]}_vnum_{type[0]}
+    doc: Load {size[1]}-bit data and zero-extend, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset_vnum: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+    arguments: ["pg: {predicate[0]}", "base: *{type[1]}", "vnum: i64"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i16, i32, i64, u16, u32, u64], u8]
+      - [[i32, i64, u32, u64], u16]
+      - [[i64, u64], u32]
+    assert_instr: ["ldff1{size_literal[1]}"]
+    test: { load: 1 }
+    compose:
+      - FnCall:
+          - "svldff1u{size_literal[1]}_{type[0]}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs:
+                          [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+
+  - name: svldff1_gather_[{type[0]}]index[_{type[1]}]
+    doc: Unextended load, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+    arguments:
+      ["pg: {predicate[0]}", "base: *{type[1]}", "indices: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [[i32, u32], [f32, i32, u32]]
+      - [[i64, u64], [f64, i64, u64]]
+    assert_instr: ["ldff1{size_literal[0]}"]
+    test: { load: 1 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "ldff1.gather.{type_kind[0].su}xtw.index.{sve_type[1]}"
+            doubleword:
+              LLVMLink:
+                name: "ldff1.gather.index.{sve_type[1]}"
+
+  - name: svldff1_gather_[{type[0]}]offset[_{type[1]}]
+    doc: Unextended load, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+    arguments:
+      ["pg: {predicate[0]}", "base: *{type[1]}", "offsets: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [[i32, u32], [f32, i32, u32]]
+      - [[i64, u64], [f64, i64, u64]]
+    assert_instr: ["ldff1{size_literal[0]}"]
+    test: { load: 1 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "ldff1.gather.{type_kind[0].su}xtw.{sve_type[1]}"
+            doubleword:
+              LLVMLink:
+                name: "ldff1.gather.{sve_type[1]}"
+
+  - name: svldff1_gather[_{type[0]}base]_offset_{type[1]}
+    doc: Unextended load, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+        - no_provenance: bases
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [f32, i32, u32]]
+      - [u64, [f64, i64, u64]]
+    assert_instr: ["ldff1{size_literal[0]}"]
+    test: { load: 1 }
+    compose:
+      - LLVMLink:
+          name: "ldff1.gather.scalar.offset.{sve_type[1]}.{sve_type[0]}"
+
+  - name: svldff1_gather[_{type[0]}base]_{type[1]}
+    doc: Unextended load, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+        - no_provenance: bases
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [f32, i32, u32]]
+      - [u64, [f64, i64, u64]]
+    assert_instr: ["ldff1{size_literal[0]}"]
+    test: { load: 1 }
+    compose:
+      - FnCall:
+          - "svldff1_gather_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - 0
+
+  - name: svldff1_gather[_{type[0]}base]_index_{type[1]}
+    doc: Unextended load, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+        - no_provenance: bases
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [f32, i32, u32]]
+      - [u64, [f64, i64, u64]]
+    assert_instr: ["ldff1{size_literal[0]}"]
+    test: { load: 1 }
+    compose:
+      - FnCall:
+          - "svldff1_gather_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]]
+
+  - name: svldff1s{size_literal[2]}_gather_[{type[0]}]index_{type[1]}
+    doc: Load {size[2]}-bit data and sign-extend, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+    arguments:
+      ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [[i32, u32], [i32, u32], i16]
+      - [[i64, u64], [i64, u64], [i16, i32]]
+    assert_instr: ["ldff1s{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "ldff1.gather.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+            doubleword:
+              LLVMLink:
+                name: "ldff1.gather.index.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+      - FnCall:
+          - simd_cast
+          - - FnCall: ["{llvm_link}", [$pg, $base, $indices]]
+
+  - name: svldff1u{size_literal[2]}_gather_[{type[0]}]index_{type[1]}
+    doc: Load {size[2]}-bit data and zero-extend, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+    arguments:
+      ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [[i32, u32], [u32, i32], u16]
+      - [[i64, u64], [u64, i64], [u16, u32]]
+    assert_instr: ["ldff1{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "ldff1.gather.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+            doubleword:
+              LLVMLink:
+                name: "ldff1.gather.index.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+      - FnCall:
+          - simd_cast
+          - - FnCall: ["{llvm_link}", [$pg, $base, $indices]]
+          - - Type: "{sve_type[1] as {type[2]}}"
+            - _
+
+  - name: svldff1s{size_literal[2]}_gather_[{type[0]}]offset_{type[1]}
+    doc: Load {size[2]}-bit data and sign-extend, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+    arguments:
+      ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [[i32, u32], [i32, u32], [i8, i16]]
+      - [[i64, u64], [i64, u64], [i8, i16, i32]]
+    assert_instr: ["ldff1s{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "ldff1.gather.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+            doubleword:
+              LLVMLink:
+                name: "ldff1.gather.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+      - FnCall:
+          - simd_cast
+          - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]]
+
+  - name: svldff1u{size_literal[2]}_gather_[{type[0]}]offset_{type[1]}
+    doc: Load {size[2]}-bit data and zero-extend, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+    arguments:
+      ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [[i32, u32], [u32, i32], [u8, u16]]
+      - [[i64, u64], [u64, i64], [u8, u16, u32]]
+    assert_instr: ["ldff1{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "ldff1.gather.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+            doubleword:
+              LLVMLink:
+                name: "ldff1.gather.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+      - FnCall:
+          - simd_cast
+          - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]]
+          - - Type: "{sve_type[1] as {type[2]}}"
+            - _
+
+  - name: svldff1s{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]}
+    doc: Load {size[2]}-bit data and sign-extend, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+        - no_provenance: bases
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [i32, u32], [i8, i16]]
+      - [u64, [i64, u64], [i8, i16, i32]]
+    assert_instr: ["ldff1s{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - LLVMLink:
+          name: "ldff1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}"
+          return_type: "{sve_type[1] as {type[2]}}"
+      - FnCall:
+          - simd_cast
+          - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]]
+
+  - name: svldff1u{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]}
+    doc: Load {size[2]}-bit data and zero-extend, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+        - no_provenance: bases
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [u32, i32], [u8, u16]]
+      - [u64, [u64, i64], [u8, u16, u32]]
+    assert_instr: ["ldff1{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - LLVMLink:
+          name: "ldff1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}"
+          return_type: "{sve_type[1] as {type[2]}}"
+      - FnCall:
+          - simd_cast
+          - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]]
+          - - Type: "{sve_type[1] as {type[2]}}"
+            - _
+
+  - name: svldff1s{size_literal[2]}_gather[_{type[0]}base]_{type[1]}
+    safety:
+      unsafe:
+        - pointer_offset: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+        - no_provenance: bases
+    doc: Load {size[2]}-bit data and sign-extend, first-faulting
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [i32, u32], [i8, i16]]
+      - [u64, [i64, u64], [i8, i16, i32]]
+    assert_instr: ["ldff1s{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - FnCall:
+          - "svldff1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - 0
+
+  - name: svldff1u{size_literal[2]}_gather[_{type[0]}base]_{type[1]}
+    doc: Load {size[2]}-bit data and zero-extend, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+        - no_provenance: bases
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [i32, u32], [u8, u16]]
+      - [u64, [i64, u64], [u8, u16, u32]]
+    assert_instr: ["ldff1{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - FnCall:
+          - "svldff1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - 0
+
+  - name: svldff1s{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]}
+    doc: Load {size[2]}-bit data and sign-extend, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+        - no_provenance: bases
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [i32, u32], i16]
+      - [u64, [i64, u64], [i16, i32]]
+    assert_instr: ["ldff1s{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - FnCall:
+          - "svldff1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]]
+
+  - name: svldff1u{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]}
+    doc: Load {size[2]}-bit data and zero-extend, first-faulting
+    safety:
+      unsafe:
+        - pointer_offset: predicated_first_faulting
+        - dereference: predicated_first_faulting
+        - unpredictable_on_fault
+        - no_provenance: bases
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [i32, u32], u16]
+      - [u64, [i64, u64], [u16, u32]]
+    assert_instr: ["ldff1{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - FnCall:
+          - "svldff1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]]
+
+  - name: svrdffr_z
+    doc: Read FFR, returning predicate of succesfully loaded elements
+    arguments: ["pg: svbool_t"]
+    return_type: svbool_t
+    assert_instr: [rdffr]
+    compose:
+      - LLVMLink: { name: "rdffr.z" }
+
+  - name: svrdffr
+    doc: Read FFR, returning predicate of succesfully loaded elements
+    arguments: []
+    return_type: svbool_t
+    assert_instr: [rdffr]
+    compose:
+      - FnCall: [svrdffr_z, [FnCall: [svptrue_b8, []]]]
+
+  - name: svsetffr
+    doc: Initialize the first-fault register to all-true
+    arguments: []
+    assert_instr: [setffr]
+    compose:
+      - LLVMLink: { name: "setffr" }
+
+  - name: svwrffr
+    doc: Write to the first-fault register
+    arguments: ["op: svbool_t"]
+    assert_instr: [wrffr]
+    compose:
+      - LLVMLink: { name: "wrffr" }
+
+  - name: svqinc{size_literal[1]}[_n_{type[0]}]
+    substitutions:
+      textual_size:
+        match_size: "{type[1]}"
+        default: word
+        byte: byte
+        halfword: halfword
+        doubleword: doubleword
+    doc: Saturating increment by number of {textual_size} elements
+    arguments: ["op: {type[0]}"]
+    static_defs: ["const IMM_FACTOR: i32"]
+    return_type: "{type[0]}"
+    types:
+      - [[i32, i64, u32, u64], [i8, i16, i32, i64]]
+    assert_instr:
+      - ["{type_kind[0].su}qinc{size_literal[1]}", "IMM_FACTOR = 1"]
+    compose:
+      - FnCall:
+          - "svqinc{size_literal[1]}_pat_n_{type[0]}"
+          - [$op]
+          - ["{{svpattern::SV_ALL}}", $IMM_FACTOR]
+
+  - name: svqinc{size_literal[1]}_pat[_n_{type[0]}]
+    substitutions:
+      textual_size:
+        match_size: "{type[1]}"
+        default: word
+        byte: byte
+        halfword: halfword
+        doubleword: doubleword
+    doc: Saturating increment by number of {textual_size} elements
+    arguments: ["op: {type[0]}"]
+    static_defs: ["const PATTERN: svpattern", "const IMM_FACTOR: i32"]
+    constraints: [{ variable: IMM_FACTOR, range: [1, 16] }]
+    return_type: "{type[0]}"
+    types:
+      - [[i32, i64, u32, u64], [i8, i16, i32, i64]]
+    assert_instr:
+      - - "{type_kind[0].su}qinc{size_literal[1]}"
+        - "PATTERN = {{svpattern::SV_ALL}}, IMM_FACTOR = 1"
+    compose:
+      - LLVMLink:
+          name: "{type_kind[0].su}qinc{size_literal[1]}.n{size[0]}"
+          arguments: ["op: {type[0]}", "pattern: svpattern", "imm_factor: i32"]
+          return_type: "{type[0]}"
+      - FnCall: ["{llvm_link}", [$op, $PATTERN, $IMM_FACTOR]]
+
+  - name: svqinc{size_literal}[_{type}]
+    substitutions:
+      textual_size:
+        match_size: "{type}"
+        default: word
+        halfword: halfword
+        doubleword: doubleword
+    doc: Saturating increment by number of {textual_size} elements
+    arguments: ["op: {sve_type}"]
+    static_defs: ["const IMM_FACTOR: i32"]
+    return_type: "{sve_type}"
+    types: [i16, u16, i32, u32, i64, u64]
+    assert_instr: [["{type_kind.su}qinc{size_literal}", "IMM_FACTOR = 1"]]
+    compose:
+      - FnCall:
+          - "svqinc{size_literal}_pat_{type}"
+          - [$op]
+          - ["{{svpattern::SV_ALL}}", $IMM_FACTOR]
+
+  - name: svqinc{size_literal}_pat[_{type}]
+    substitutions:
+      textual_size:
+        match_size: "{type}"
+        default: word
+        halfword: halfword
+        doubleword: doubleword
+    doc: Saturating increment by number of {textual_size} elements
+    arguments: ["op: {sve_type}"]
+    static_defs: ["const PATTERN: svpattern", "const IMM_FACTOR: i32"]
+    constraints: [{ variable: IMM_FACTOR, range: [1, 16] }]
+    return_type: "{sve_type}"
+    types: [i16, u16, i32, u32, i64, u64]
+    assert_instr:
+      - - "{type_kind.su}qinc{size_literal}"
+        - "PATTERN = {{svpattern::SV_ALL}}, IMM_FACTOR = 1"
+    compose:
+      - LLVMLink:
+          name: "{type_kind.su}qinc{size_literal}.{sve_type}"
+          arguments: ["op: {sve_type}", "pattern: svpattern", "imm_factor: i32"]
+          return_type: "{sve_type}"
+      - FnCall: ["{llvm_link}", [$op, $PATTERN, $IMM_FACTOR]]
+
+  - name: svqdec{size_literal[1]}[_n_{type[0]}]
+    substitutions:
+      textual_size:
+        match_size: "{type[1]}"
+        default: word
+        byte: byte
+        halfword: halfword
+        doubleword: doubleword
+    doc: Saturating decrement by number of {textual_size} elements
+    arguments: ["op: {type[0]}"]
+    static_defs: ["const IMM_FACTOR: i32"]
+    return_type: "{type[0]}"
+    types:
+      - [[i32, i64, u32, u64], [i8, i16, i32, i64]]
+    assert_instr:
+      - ["{type_kind[0].su}qdec{size_literal[1]}", "IMM_FACTOR = 1"]
+    compose:
+      - FnCall:
+          - "svqdec{size_literal[1]}_pat_n_{type[0]}"
+          - [$op]
+          - ["{{svpattern::SV_ALL}}", $IMM_FACTOR]
+
+  - name: svqdec{size_literal[1]}_pat[_n_{type[0]}]
+    substitutions:
+      textual_size:
+        match_size: "{type[1]}"
+        default: word
+        byte: byte
+        halfword: halfword
+        doubleword: doubleword
+    doc: Saturating decrement by number of {textual_size} elements
+    arguments: ["op: {type[0]}"]
+    static_defs: ["const PATTERN: svpattern", "const IMM_FACTOR: i32"]
+    constraints: [{ variable: IMM_FACTOR, range: [1, 16] }]
+    return_type: "{type[0]}"
+    types:
+      - [[i32, i64, u32, u64], [i8, i16, i32, i64]]
+    assert_instr:
+      - - "{type_kind[0].su}qdec{size_literal[1]}"
+        - "PATTERN = {{svpattern::SV_ALL}}, IMM_FACTOR = 1"
+    compose:
+      - LLVMLink:
+          name: "{type_kind[0].su}qdec{size_literal[1]}.n{size[0]}"
+          arguments: ["op: {type[0]}", "pattern: svpattern", "imm_factor: i32"]
+          return_type: "{type[0]}"
+      - FnCall: ["{llvm_link}", [$op, $PATTERN, $IMM_FACTOR]]
+
+  - name: svqdec{size_literal}[_{type}]
+    substitutions:
+      textual_size:
+        match_size: "{type}"
+        default: word
+        halfword: halfword
+        doubleword: doubleword
+    doc: Saturating decrement by number of {textual_size} elements
+    arguments: ["op: {sve_type}"]
+    static_defs: ["const IMM_FACTOR: i32"]
+    return_type: "{sve_type}"
+    types: [i16, u16, i32, u32, i64, u64]
+    assert_instr: [["{type_kind.su}qdec{size_literal}", "IMM_FACTOR = 1"]]
+    compose:
+      - FnCall:
+          - "svqdec{size_literal}_pat_{type}"
+          - [$op]
+          - ["{{svpattern::SV_ALL}}", $IMM_FACTOR]
+
+  - name: svqdec{size_literal}_pat[_{type}]
+    substitutions:
+      textual_size:
+        match_size: "{type}"
+        default: word
+        halfword: halfword
+        doubleword: doubleword
+    doc: Saturating decrement by number of {textual_size} elements
+    arguments: ["op: {sve_type}"]
+    static_defs: ["const PATTERN: svpattern", "const IMM_FACTOR: i32"]
+    constraints: [{ variable: IMM_FACTOR, range: [1, 16] }]
+    return_type: "{sve_type}"
+    types: [i16, u16, i32, u32, i64, u64]
+    assert_instr:
+      - - "{type_kind.su}qdec{size_literal}"
+        - "PATTERN = {{svpattern::SV_ALL}}, IMM_FACTOR = 1"
+    compose:
+      - LLVMLink:
+          name: "{type_kind.su}qdec{size_literal}.{sve_type}"
+          arguments: ["op: {sve_type}", "pattern: svpattern", "imm_factor: i32"]
+          return_type: "{sve_type}"
+      - FnCall: ["{llvm_link}", [$op, $PATTERN, $IMM_FACTOR]]
+
+  - name: svst1[_{type}]
+    doc: Non-truncating store
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type}"]
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["st1{size_literal}"]
+    test: { store: 0 }
+    compose:
+      - LLVMLink:
+          name: "st1.{sve_type}"
+          arguments:
+            - "data: {sve_type}"
+            - "pg: {predicate}"
+            - "ptr: *mut {type}"
+      - FnCall: ["{llvm_link}", [$data, $pg, $base]]
+
+  - name: svst1_scatter_[{type[0]}]index[_{type[1]}]
+    doc: Non-truncating store
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments:
+      - "pg: {predicate[0]}"
+      - "base: *mut {type[1]}"
+      - "indices: {sve_type[0]}"
+      - "data: {sve_type[1]}"
+    types:
+      - [[i32, u32], [f32, i32, u32]]
+      - [[i64, u64], [f64, i64, u64]]
+    assert_instr: ["st1{size_literal[0]}"]
+    test: { store: 1 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "st1.scatter.{type_kind[0].su}xtw.index.{sve_type[1]}"
+                arguments:
+                  - "data: {sve_type[1]}"
+                  - "pg: {predicate[0]}"
+                  - "base: *mut {type[1]}"
+                  - "indices: {sve_type[0]}"
+            doubleword:
+              LLVMLink:
+                name: "st1.scatter.index.{sve_type[1]}"
+                arguments:
+                  - "data: {sve_type[1]}"
+                  - "pg: {predicate[0]}"
+                  - "base: *mut {type[1]}"
+                  - "indices: {sve_type[0]}"
+      - FnCall: ["{llvm_link}", [$data, $pg, $base, $indices]]
+
+  - name: svst1_scatter_[{type[0]}]offset[_{type[1]}]
+    doc: Non-truncating store
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments:
+      - "pg: {predicate[0]}"
+      - "base: *mut {type[1]}"
+      - "offsets: {sve_type[0]}"
+      - "data: {sve_type[1]}"
+    types:
+      - [[i32, u32], [f32, i32, u32]]
+      - [[i64, u64], [f64, i64, u64]]
+    assert_instr: ["st1{size_literal[0]}"]
+    test: { store: 1 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "st1.scatter.{type_kind[0].su}xtw.{sve_type[1]}"
+                arguments:
+                  - "data: {sve_type[1]}"
+                  - "pg: {predicate[0]}"
+                  - "base: *mut {type[1]}"
+                  - "offsets: {sve_type[0]}"
+            doubleword:
+              LLVMLink:
+                name: "st1.scatter.{sve_type[1]}"
+                arguments:
+                  - "data: {sve_type[1]}"
+                  - "pg: {predicate[0]}"
+                  - "base: *mut {type[1]}"
+                  - "offsets: {sve_type[0]}"
+      - FnCall: ["{llvm_link}", [$data, $pg, $base, $offsets]]
+
+  - name: svst1_scatter[_{type[0]}base]_offset[_{type[1]}]
+    doc: Non-truncating store
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+    arguments:
+      - "pg: {predicate[0]}"
+      - "bases: {sve_type[0]}"
+      - "offset: i64"
+      - "data: {sve_type[1]}"
+    types:
+      - [u32, [f32, i32, u32]]
+      - [u64, [f64, i64, u64]]
+    assert_instr: ["st1{size_literal[0]}"]
+    test: { store: 1 }
+    compose:
+      - LLVMLink:
+          arguments:
+            - "data: {sve_type[1]}"
+            - "pg: {predicate[0]}"
+            - "bases: {sve_type[0]}"
+            - "offset: i64"
+          name: "st1.scatter.scalar.offset.{sve_type[1]}.{sve_type[0]}"
+      - FnCall: ["{llvm_link}", [$data, $pg, $bases, $offset]]
+
+  - name: svst1_scatter[_{type[0]}base_{type[1]}]
+    doc: Non-truncating store
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+    arguments:
+      ["pg: {predicate[0]}", "bases: {sve_type[0]}", "data: {sve_type[1]}"]
+    types:
+      - [u32, [f32, i32, u32]]
+      - [u64, [f64, i64, u64]]
+    assert_instr: ["st1{size_literal[0]}"]
+    test: { store: 1 }
+    compose:
+      - FnCall:
+          - "svst1_scatter_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - 0
+            - $data
+
+  - name: svst1_scatter[_{type[0]}base]_index[_{type[1]}]
+    doc: Non-truncating store
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+    arguments:
+      - "pg: {predicate[0]}"
+      - "bases: {sve_type[0]}"
+      - "index: i64"
+      - "data: {sve_type[1]}"
+    types:
+      - [u32, [f32, i32, u32]]
+      - [u64, [f64, i64, u64]]
+    assert_instr: ["st1{size_literal[0]}"]
+    test: { store: 1 }
+    compose:
+      - FnCall:
+          - "svst1_scatter_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]]
+            - $data
+
+  - name: svst1{size_literal[2]}_scatter_[{type[0]}]index[_{type[1]}]
+    doc: Truncate to {size[2]} bits and store
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments:
+      - "pg: {predicate[0]}"
+      - "base: *mut {type[2]}"
+      - "indices: {sve_type[0]}"
+      - "data: {sve_type[1]}"
+    types:
+      - [[i32, u32], i32, i16]
+      - [[i32, u32], u32, u16]
+      - [[i64, u64], i64, [i16, i32]]
+      - [[i64, u64], u64, [u16, u32]]
+    assert_instr: ["st1{size_literal[2]}"]
+    test: { store: 2 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "st1.scatter.{type_kind[0].su}xtw.index.{sve_type[1] as {type[2]}}"
+                arguments:
+                  - "data: {sve_type[1] as {type[2]}}"
+                  - "pg: {predicate[0]}"
+                  - "base: *mut {type[2]}"
+                  - "indices: {sve_type[0]}"
+            doubleword:
+              LLVMLink:
+                name: "st1.scatter.index.{sve_type[1] as {type[2]}}"
+                arguments:
+                  - "data: {sve_type[1] as {type[2]}}"
+                  - "pg: {predicate[0]}"
+                  - "base: *mut {type[2]}"
+                  - "indices: {sve_type[0]}"
+      - FnCall:
+          - "{llvm_link}"
+          - [FnCall: ["simd_cast", [$data]], $pg, $base, $indices]
+
+  - name: svst1{size_literal[2]}_scatter_[{type[0]}]offset[_{type[1]}]
+    doc: Truncate to {size[2]} bits and store
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments:
+      - "pg: {predicate[0]}"
+      - "base: *mut {type[2]}"
+      - "offsets: {sve_type[0]}"
+      - "data: {sve_type[1]}"
+    types:
+      - [[i32, u32], i32, [i8, i16]]
+      - [[i32, u32], u32, [u8, u16]]
+      - [[i64, u64], i64, [i8, i16, i32]]
+      - [[i64, u64], u64, [u8, u16, u32]]
+    assert_instr: ["st1{size_literal[2]}"]
+    test: { store: 2 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "st1.scatter.{type_kind[0].su}xtw.{sve_type[1] as {type[2]}}"
+                arguments:
+                  - "data: {sve_type[1] as {type[2]}}"
+                  - "pg: {predicate[0]}"
+                  - "base: *mut {type[2]}"
+                  - "offsets: {sve_type[0]}"
+            doubleword:
+              LLVMLink:
+                name: "st1.scatter.{sve_type[1] as {type[2]}}"
+                arguments:
+                  - "data: {sve_type[1] as {type[2]}}"
+                  - "pg: {predicate[0]}"
+                  - "base: *mut {type[2]}"
+                  - "offsets: {sve_type[0]}"
+      - FnCall:
+          - "{llvm_link}"
+          - [FnCall: ["simd_cast", [$data]], $pg, $base, $offsets]
+
+  - name: svst1{size_literal[2]}_scatter[_{type[0]}base]_offset[_{type[1]}]
+    doc: Truncate to {size[2]} bits and store
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+    arguments:
+      - "pg: {predicate[0]}"
+      - "bases: {sve_type[0]}"
+      - "offset: i64"
+      - "data: {sve_type[1]}"
+    types:
+      - [u32, [i32, u32], [i8, i16]]
+      - [u64, [i64, u64], [i8, i16, i32]]
+    assert_instr: ["st1{size_literal[2]}"]
+    test: { store: 2 }
+    compose:
+      - LLVMLink:
+          name: "st1.scatter.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}"
+          arguments:
+            - "data: {sve_type[1] as {type[2]}}"
+            - "pg: {predicate[0]}"
+            - "bases: {sve_type[0]}"
+            - "offset: i64"
+      - FnCall:
+          - "{llvm_link}"
+          - [FnCall: ["simd_cast", [$data]], $pg, $bases, $offset]
+
+  - name: svst1{size_literal[2]}_scatter[_{type[0]}base_{type[1]}]
+    doc: Truncate to {size[2]} bits and store
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+    arguments:
+      ["pg: {predicate[0]}", "bases: {sve_type[0]}", "data: {sve_type[1]}"]
+    types:
+      - [u32, [i32, u32], [i8, i16]]
+      - [u64, [i64, u64], [i8, i16, i32]]
+    assert_instr: ["st1{size_literal[2]}"]
+    test: { store: 2 }
+    compose:
+      - FnCall:
+          - "svst1{size_literal[2]}_scatter_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - 0
+            - $data
+
+  - name: svst1{size_literal[2]}_scatter[_{type[0]}base]_index[_{type[1]}]
+    doc: Truncate to {size[2]} bits and store
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+    arguments:
+      - "pg: {predicate[0]}"
+      - "bases: {sve_type[0]}"
+      - "index: i64"
+      - "data: {sve_type[1]}"
+    types:
+      - [u32, [i32, u32], i16]
+      - [u64, [i64, u64], [i16, i32]]
+    assert_instr: ["st1{size_literal[2]}"]
+    test: { store: 2 }
+    compose:
+      - FnCall:
+          - "svst1{size_literal[2]}_scatter_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]]
+            - $data
+
+  - name: svstnt1[_{type}]
+    doc: Non-truncating store, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - non_temporal
+    arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type}"]
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["stnt1{size_literal}"]
+    test: { store: 0 }
+    compose:
+      - LLVMLink:
+          name: "stnt1.{sve_type}"
+          arguments:
+            - "data: {sve_type}"
+            - "pg: {predicate}"
+            - "ptr: *mut {type}"
+      - FnCall: ["{llvm_link}", [$data, $pg, $base]]
+
+  - name: svstnt1_vnum[_{type}]
+    doc: Non-truncating store, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - non_temporal
+    arguments:
+      ["pg: {predicate}", "base: *mut {type}", "vnum: i64", "data: {sve_type}"]
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["stnt1{size_literal}"]
+    test: { store: 0 }
+    compose:
+      - FnCall:
+          - "svstnt1_{type}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+            - $data
+
+  - name: svst1{size_literal[1]}[_{type[0]}]
+    doc: Truncate to {size[1]} bits and store
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments:
+      ["pg: {predicate[0]}", "base: *mut {type[1]}", "data: {sve_type[0]}"]
+    types:
+      - [[i16, i32, i64], i8]
+      - [[u16, u32, u64], u8]
+      - [[i32, i64], i16]
+      - [[u32, u64], u16]
+      - [i64, i32]
+      - [u64, u32]
+    assert_instr: ["st1{size_literal[1]}"]
+    test: { store: 1 }
+    compose:
+      - LLVMLink:
+          name: "st1.{sve_type[0] as {type[1]}}"
+          arguments:
+            - "data: {sve_type[0] as {type[1]}}"
+            - "pg: {predicate[0]}"
+            - "ptr: *mut {type[1]}"
+      - FnCall:
+          - "{llvm_link}"
+          - [FnCall: ["simd_cast", [$data]], $pg, $base]
+
+  - name: svst1{size_literal[1]}_vnum[_{type[0]}]
+    doc: Truncate to {size[1]} bits and store
+    safety:
+      unsafe:
+        - pointer_offset_vnum: predicated
+        - dereference: predicated
+    arguments:
+      - "pg: {predicate[0]}"
+      - "base: *mut {type[1]}"
+      - "vnum: i64"
+      - "data: {sve_type[0]}"
+    types:
+      - [[i16, i32, i64], i8]
+      - [[u16, u32, u64], u8]
+      - [[i32, i64], i16]
+      - [[u32, u64], u16]
+      - [i64, i32]
+      - [u64, u32]
+    assert_instr: ["st1{size_literal[1]}"]
+    test: { store: 1 }
+    compose:
+      - FnCall:
+          - "svst1{size_literal[1]}_{type[0]}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs:
+                          [{ FnCall: ["svcnt{size_literal[0]}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+            - $data
+
+  - name: svst1_vnum[_{type}]
+    doc: Non-truncating store
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments:
+      ["pg: {predicate}", "base: *mut {type}", "vnum: i64", "data: {sve_type}"]
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["st1{size_literal}"]
+    test: { store: 0 }
+    compose:
+      - FnCall:
+          - "svst1_{type}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+            - $data
+
+  - name: svst2[_{type}]
+    doc: Store two vectors into two-element tuples
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type_x2}"]
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["st2{size_literal}"]
+    test: { store: 0 }
+    compose:
+      - LLVMLink:
+          name: "st2.{sve_type}"
+          arguments:
+            - "data0: {sve_type}"
+            - "data1: {sve_type}"
+            - "pg: {predicate}"
+            - "ptr: *mut {type}"
+      - FnCall:
+          - "{llvm_link}"
+          - - FnCall: ["svget2_{type}", ["$data"], [0]]
+            - FnCall: ["svget2_{type}", ["$data"], [1]]
+            - "$pg"
+            - "$base"
+
+  - name: svst2_vnum[_{type}]
+    doc: Store two vectors into two-element tuples
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments:
+      - "pg: {predicate}"
+      - "base: *mut {type}"
+      - "vnum: i64"
+      - "data: {sve_type_x2}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["st2{size_literal}"]
+    test: { store: 0 }
+    compose:
+      - FnCall:
+          - "svst2_{type}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+            - $data
+
+  - name: svst3[_{type}]
+    doc: Store three vectors into three-element tuples
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type_x3}"]
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["st3{size_literal}"]
+    test: { store: 0 }
+    compose:
+      - LLVMLink:
+          name: "st3.{sve_type}"
+          arguments:
+            - "data0: {sve_type}"
+            - "data1: {sve_type}"
+            - "data2: {sve_type}"
+            - "pg: {predicate}"
+            - "ptr: *mut {type}"
+      - FnCall:
+          - "{llvm_link}"
+          - - FnCall: ["svget3_{type}", ["$data"], [0]]
+            - FnCall: ["svget3_{type}", ["$data"], [1]]
+            - FnCall: ["svget3_{type}", ["$data"], [2]]
+            - "$pg"
+            - "$base"
+
+  - name: svst3_vnum[_{type}]
+    doc: Store three vectors into three-element tuples
+    safety:
+      unsafe:
+        - pointer_offset_vnum: predicated
+        - dereference: predicated
+    arguments:
+      - "pg: {predicate}"
+      - "base: *mut {type}"
+      - "vnum: i64"
+      - "data: {sve_type_x3}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["st3{size_literal}"]
+    test: { store: 0 }
+    compose:
+      - FnCall:
+          - "svst3_{type}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+            - $data
+
+  - name: svst4[_{type}]
+    doc: Store four vectors into four-element tuples
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+    arguments: ["pg: {predicate}", "base: *mut {type}", "data: {sve_type_x4}"]
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["st4{size_literal}"]
+    test: { store: 0 }
+    compose:
+      - LLVMLink:
+          name: "st4.{sve_type}"
+          arguments:
+            - "data0: {sve_type}"
+            - "data1: {sve_type}"
+            - "data2: {sve_type}"
+            - "data3: {sve_type}"
+            - "pg: {predicate}"
+            - "ptr: *mut {type}"
+      - FnCall:
+          - "{llvm_link}"
+          - - FnCall: ["svget4_{type}", ["$data"], [0]]
+            - FnCall: ["svget4_{type}", ["$data"], [1]]
+            - FnCall: ["svget4_{type}", ["$data"], [2]]
+            - FnCall: ["svget4_{type}", ["$data"], [3]]
+            - "$pg"
+            - "$base"
+
+  - name: svst4_vnum[_{type}]
+    doc: Store four vectors into four-element tuples
+    safety:
+      unsafe:
+        - pointer_offset_vnum: predicated
+        - dereference: predicated
+    arguments:
+      - "pg: {predicate}"
+      - "base: *mut {type}"
+      - "vnum: i64"
+      - "data: {sve_type_x4}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["st4{size_literal}"]
+    test: { store: 0 }
+    compose:
+      - FnCall:
+          - "svst4_{type}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+            - $data
+
+  - name: svtbl[_{type[0]}]
+    doc: Table lookup in single-vector table
+    arguments: ["data: {sve_type[0]}", "indices: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    assert_instr: [tbl]
+    types:
+      - [f32, u32]
+      - [f64, u64]
+      - [i8, u8]
+      - [i16, u16]
+      - [i32, u32]
+      - [i64, u64]
+      - [u8, u8]
+      - [u16, u16]
+      - [u32, u32]
+      - [u64, u64]
+    compose:
+      - LLVMLink: { name: "tbl.{sve_type[0]}" }
+
+  - name: svwhilele_{type[1]}[_{type[0]}]
+    doc: While incrementing scalar is less than or equal to
+    arguments: ["op1: {type[0]}", "op2: {type[0]}"]
+    return_type: "{sve_type[1]}"
+    types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]]
+    assert_instr: [{ default: whilele, unsigned: whilels }]
+    compose:
+      - MatchKind:
+          - "{type[0]}"
+          - default: { LLVMLink: { name: "whilele.{sve_type[1]}.{type[0]}" } }
+            unsigned: { LLVMLink: { name: "whilels.{sve_type[1]}.{type[0]}" } }
+
+  - name: svwhilelt_{type[1]}[_{type[0]}]
+    doc: While incrementing scalar is less than
+    arguments: ["op1: {type[0]}", "op2: {type[0]}"]
+    return_type: "{sve_type[1]}"
+    types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]]
+    assert_instr: [{ default: whilelt, unsigned: whilelo }]
+    compose:
+      - MatchKind:
+          - "{type[0]}"
+          - default: { LLVMLink: { name: "whilelt.{sve_type[1]}.{type[0]}" } }
+            unsigned: { LLVMLink: { name: "whilelo.{sve_type[1]}.{type[0]}" } }
+
+  - name: svmax[{_n}_{type}]{_mxz}
+    doc: Maximum
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64, u8, u16, u32, u64, f32, f64]
+    zeroing_method: { select: op1 }
+    assert_instr: ["{type_kind}max"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.fsu}max.{sve_type}" }
+
+  - name: svmaxnm[{_n}_{type}]{_mxz}
+    doc: Maximum number
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    zeroing_method: { select: op1 }
+    assert_instr: [fmaxnm]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.f}maxnm.{sve_type}" }
+
+  - name: svpfalse[_b]
+    doc: Set all predicate elements to false
+    arguments: []
+    return_type: "svbool_t"
+    assert_instr: [pfalse]
+    compose:
+      - FnCall:
+          - "svdupq_n_b8"
+          - - false
+            - false
+            - false
+            - false
+            - false
+            - false
+            - false
+            - false
+            - false
+            - false
+            - false
+            - false
+            - false
+            - false
+            - false
+            - false
+
+  - name: svptrue_pat_{type}
+    doc: Set predicate elements to true
+    arguments: []
+    static_defs: ["const PATTERN: svpattern"]
+    return_type: "{predicate}"
+    types: [b8, b16, b32, b64]
+    assert_instr: [[ptrue, "PATTERN = {{svpattern::SV_ALL}}"]]
+    compose:
+      - LLVMLink:
+          name: ptrue.{sve_type}
+          arguments: ["pattern: svpattern"]
+      - FnCall: ["{llvm_link}", [$PATTERN]]
+
+  - name: svptrue_{type}
+    doc: Set predicate elements to true
+    arguments: []
+    return_type: "svbool_t"
+    types: [b8, b16, b32, b64]
+    assert_instr: [ptrue]
+    compose:
+      - FnCall: ["svptrue_pat_{type}", [], ["{{svpattern::SV_ALL}}"]]
+
+  - name: svptest_any
+    doc: Test whether any active element is true
+    arguments: ["pg: svbool_t", "op: svbool_t"]
+    return_type: "bool"
+    assert_instr: [ptest]
+    compose:
+      - LLVMLink: { name: "ptest.any.nxv16i1" }
+
+  - name: svptest_first
+    doc: Test whether first active element is true
+    arguments: ["pg: svbool_t", "op: svbool_t"]
+    return_type: "bool"
+    assert_instr: [ptest]
+    compose:
+      - LLVMLink: { name: "ptest.first.nxv16i1" }
+
+  - name: svptest_last
+    doc: Test whether last active element is true
+    arguments: ["pg: svbool_t", "op: svbool_t"]
+    return_type: "bool"
+    assert_instr: [ptest]
+    compose:
+      - LLVMLink: { name: "ptest.last.nxv16i1" }
+
+  - name: svpfirst[_b]
+    doc: Set the first active predicate element to true
+    arguments: ["pg: svbool_t", "op: svbool_t"]
+    return_type: "svbool_t"
+    assert_instr: [pfirst]
+    compose:
+      - LLVMLink: { name: "pfirst.nxv16i1" }
+
+  - name: svpnext_{type}
+    doc: Find next active predicate
+    arguments: ["pg: {predicate}", "op: {predicate}"]
+    return_type: "{predicate}"
+    types: [b8, b16, b32, b64]
+    assert_instr: [pnext]
+    compose:
+      - LLVMLink: { name: "pnext.{sve_type}" }
+
+  - name: svbrkn[_b]_z
+    doc: Propagate break to next partition
+    arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"]
+    return_type: "svbool_t"
+    assert_instr: [brkn]
+    compose:
+      - LLVMLink: { name: "brkn.z.nxv16i1" }
+
+  - name: svbrkb[_b]_z
+    doc: Break before first true condition
+    arguments: ["pg: svbool_t", "op: svbool_t"]
+    return_type: "svbool_t"
+    assert_instr: [brkb]
+    compose:
+      - LLVMLink: { name: "brkb.z.nxv16i1" }
+
+  - name: svbrkb[_b]_m
+    doc: Break before first true condition
+    arguments: ["inactive: svbool_t", "pg: svbool_t", "op: svbool_t"]
+    return_type: "svbool_t"
+    assert_instr: [brkb]
+    compose:
+      - LLVMLink: { name: "brkb.nxv16i1" }
+
+  - name: svbrkpb[_b]_z
+    doc: Break before first true condition, propagating from previous partition
+    arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"]
+    return_type: "svbool_t"
+    assert_instr: [brkpb]
+    compose:
+      - LLVMLink: { name: "brkpb.z.nxv16i1" }
+
+  - name: svbrka[_b]_z
+    doc: Break after first true condition
+    arguments: ["pg: svbool_t", "op: svbool_t"]
+    return_type: "svbool_t"
+    assert_instr: [brka]
+    compose:
+      - LLVMLink: { name: "brka.z.nxv16i1" }
+
+  - name: svbrka[_b]_m
+    doc: Break after first true condition
+    arguments: ["inactive: svbool_t", "pg: svbool_t", "op: svbool_t"]
+    return_type: "svbool_t"
+    assert_instr: [brka]
+    compose:
+      - LLVMLink: { name: "brka.nxv16i1" }
+
+  - name: svbrkpa[_b]_z
+    doc: Break after first true condition, propagating from previous partition
+    arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"]
+    return_type: "svbool_t"
+    assert_instr: [brkpa]
+    compose:
+      - LLVMLink: { name: "brkpa.z.nxv16i1" }
+
+  - name: svsel[_b]
+    doc: Conditionally select elements
+    arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"]
+    return_type: "svbool_t"
+    assert_instr: [sel]
+    compose:
+      - FnCall: ["simd_select", [$pg, $op1, $op2]]
+
+  - name: svsel[_{type}]
+    doc: Conditionally select elements
+    arguments: ["pg: svbool_t", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [sel]
+    compose:
+      - FnCall:
+          - "simd_select"
+          - - MatchSize:
+                - "{type}"
+                - { default: { MethodCall: [$pg, into, []] }, byte: $pg }
+            - $op1
+            - $op2
+          - - MatchSize:
+                - "{type}"
+                - byte: svbool_t
+                  halfword: svbool8_t
+                  default: svbool4_t
+                  doubleword: svbool2_t
+            - _
+
+  - name: svsub[{_n}_{type}]{_mxz}
+    doc: Subtract
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64, u8, u16, u32, u64, f32, f64]
+    assert_instr: ["{type_kind.f}sub"]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.f}sub.{sve_type}" }
+
+  - name: svsubr[{_n}_{type}]{_mxz}
+    doc: Subtract reversed
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64, u8, u16, u32, u64, f32, f64]
+    assert_instr: ["{type_kind.f}subr"]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.f}subr.{sve_type}" }
+
+  - name: svcntp_{predicate}
+    doc: Count set predicate bits
+    arguments: ["pg: {predicate}", "op: {predicate}"]
+    types: [b8, b16, b32, b64]
+    return_type: u64
+    assert_instr: [cntp]
+    compose:
+      - LLVMLink: { name: "cntp.{predicate}" }
+
+  - name: svcompact[_{type}]
+    doc: Shuffle active elements of vector to the right and fill with zero
+    arguments: ["pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i32, i64, u32, u64]
+    assert_instr: [compact]
+    compose:
+      - LLVMLink: { name: "compact.{sve_type}" }
+
+  - name: svlasta[_{type}]
+    doc: Extract element after last
+    arguments: ["pg: {predicate}", "op: {sve_type}"]
+    return_type: "{type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [lasta]
+    compose:
+      - LLVMLink: { name: "lasta.{sve_type}" }
+
+  - name: svclasta[_{type}]
+    doc: Conditionally extract element after last
+    arguments: ["pg: {predicate}", "fallback: {sve_type}", "data: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [clasta]
+    compose:
+      - LLVMLink: { name: "clasta.{sve_type}" }
+
+  - name: svclasta[_n_{type}]
+    doc: Conditionally extract element after last
+    arguments: ["pg: {predicate}", "fallback: {type}", "data: {sve_type}"]
+    return_type: "{type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [clasta]
+    compose:
+      - LLVMLink: { name: "clasta.n.{sve_type}" }
+
+  - name: svlastb[_{type}]
+    doc: Extract last element
+    arguments: ["pg: {predicate}", "op: {sve_type}"]
+    return_type: "{type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [lastb]
+    compose:
+      - LLVMLink: { name: "lastb.{sve_type}" }
+
+  - name: svclastb[_{type}]
+    doc: Conditionally extract last element
+    arguments: ["pg: {predicate}", "fallback: {sve_type}", "data: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [clastb]
+    compose:
+      - LLVMLink: { name: "clastb.{sve_type}" }
+
+  - name: svclastb[_n_{type}]
+    doc: Conditionally extract last element
+    arguments: ["pg: {predicate}", "fallback: {type}", "data: {sve_type}"]
+    return_type: "{type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [clastb]
+    compose:
+      - LLVMLink: { name: "clastb.n.{sve_type}" }
+
+  - name: svqdecp[_{type}]
+    doc: Saturating decrement by active element count
+    arguments: ["op: {sve_type}", "pg: {predicate}"]
+    return_type: "{sve_type}"
+    types: [i16, i32, i64, u16, u32, u64]
+    assert_instr: ["{type_kind.su}qdecp"]
+    compose:
+      - LLVMLink: { name: "{type_kind.su}qdecp.{sve_type}" }
+
+  - name: svqdecp[_n_{type[0]}]_{type[1]}
+    doc: Saturating decrement by active element count
+    arguments: ["op: {type[0]}", "pg: {sve_type[1]}"]
+    return_type: "{type[0]}"
+    types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]]
+    assert_instr: ["{type_kind[0].su}qdecp"]
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}qdecp.n{size[0]}.{sve_type[1]}" }
+
+  - name: svqincp[_{type}]
+    doc: Saturating increment by active element count
+    arguments: ["op: {sve_type}", "pg: {predicate}"]
+    return_type: "{sve_type}"
+    types: [i16, i32, i64, u16, u32, u64]
+    assert_instr: ["{type_kind.su}qincp"]
+    compose:
+      - LLVMLink: { name: "{type_kind.su}qincp.{sve_type}" }
+
+  - name: svqincp[_n_{type[0]}]_{type[1]}
+    doc: Saturating increment by active element count
+    arguments: ["op: {type[0]}", "pg: {sve_type[1]}"]
+    return_type: "{type[0]}"
+    types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]]
+    assert_instr: ["{type_kind[0].su}qincp"]
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}qincp.n{size[0]}.{sve_type[1]}" }
+
+  - name: svtmad[_{type}]
+    doc: Trigonometric multiply-add coefficient
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    static_defs: ["const IMM3: i32"]
+    constraints: [{ variable: IMM3, range: [0, 7] }]
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    assert_instr: [[ftmad, "IMM3 = 0"]]
+    compose:
+      - LLVMLink:
+          name: "ftmad.x.{sve_type}"
+          arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"]
+      - FnCall:
+          - "{llvm_link}"
+          - [op1, op2, IMM3]
+
+  - name: svtsmul[_{type[0]}]
+    doc: Trigonometric starting value
+    arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [f32, u32]
+      - [f64, u64]
+    assert_instr: [ftsmul]
+    compose:
+      - LLVMLink:
+          name: "ftsmul.x.{sve_type[0]}"
+
+  - name: svtssel[_{type[0]}]
+    doc: Trigonometric select coefficient
+    arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [f32, u32]
+      - [f64, u64]
+    assert_instr: [ftssel]
+    compose:
+      - LLVMLink:
+          name: "ftssel.x.{sve_type[0]}"
+
+  - name: svprf{size_literal}
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+    substitutions:
+      textual_size:
+        match_size: "{type}"
+        default: words
+        byte: bytes
+        halfword: halfwords
+        doubleword: doublewords
+    doc: Prefetch {textual_size}
+    arguments: ["pg: {predicate}", "base: *T"]
+    static_defs: ["const OP: svprfop", T]
+    types: [b8, b16, b32, b64]
+    assert_instr:
+      - ["prf{size_literal}", "OP = {{svprfop::SV_PLDL1KEEP}}, T = i64"]
+    test: { load: 0 }
+    compose:
+      - LLVMLink:
+          name: "prf.{sve_type}"
+          arguments:
+            ["pg: {predicate}", "base: *crate::ffi::c_void", "op: svprfop"]
+      - FnCall:
+          - "{llvm_link}"
+          - - $pg
+            - CastAs: [$base, "*const crate::ffi::c_void"]
+            - $OP
+
+  - name: svprf{size_literal}_vnum
+    safety:
+      unsafe:
+        - pointer_offset_vnum: predicated
+    substitutions:
+      textual_size:
+        match_size: "{type}"
+        default: words
+        byte: bytes
+        halfword: halfwords
+        doubleword: doublewords
+    doc: Prefetch {textual_size}
+    arguments: ["pg: {predicate}", "base: *T", "vnum: i64"]
+    static_defs: ["const OP: svprfop", T]
+    types: [b8, b16, b32, b64]
+    assert_instr:
+      - ["prf{size_literal}", "OP = {{svprfop::SV_PLDL1KEEP}}, T = i64"]
+    test: { load: 0 }
+    compose:
+      - FnCall:
+          - "svprf{size_literal}"
+          - - $pg
+            - MethodCall:
+                - $base
+                - offset
+                - - Multiply:
+                      - CastAs: [{ FnCall: ["svcnt{size_literal}", []] }, isize]
+                      - CastAs: [$vnum, isize]
+          - - $OP
+            - _
+
+  - name: svprf{size_literal[1]}_gather_[{type[0]}]{index_or_offset}
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+    substitutions:
+      index_or_offset:
+        { match_size: "{type[1]}", default: "index", byte: "offset" }
+      indices_or_offsets:
+        { match_size: "{type[1]}", default: "indices", byte: "offsets" }
+      textual_size:
+        match_size: "{type[1]}"
+        default: words
+        byte: bytes
+        halfword: halfwords
+        doubleword: doublewords
+    doc: Prefetch {textual_size}
+    types:
+      - [[i32, u32, i64, u64], [i8, i16, i32, i64]]
+    arguments:
+      ["pg: {predicate[0]}", "base: *T", "{indices_or_offsets}: {sve_type[0]}"]
+    static_defs: ["const OP: svprfop", T]
+    assert_instr:
+      [["prf{size_literal[1]}", "OP = {{svprfop::SV_PLDL1KEEP}}, T = i64"]]
+    test: { load: 0 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "prf{size_literal[1]}.gather.{type_kind[0].su}xtw.index.{sve_type[0]}"
+                arguments:
+                  - "pg: {predicate[0]}"
+                  - "base: *crate::ffi::c_void"
+                  - "{indices_or_offsets}: {sve_type[0]}"
+                  - "op: svprfop"
+            doubleword:
+              LLVMLink:
+                name: "prf{size_literal[1]}.gather.index.{sve_type[0]}"
+                arguments:
+                  - "pg: {predicate[0]}"
+                  - "base: *crate::ffi::c_void"
+                  - "{indices_or_offsets}: {sve_type[0]}"
+                  - "op: svprfop"
+      - FnCall:
+          - "{llvm_link}"
+          - - $pg
+            - CastAs: [$base, "*const crate::ffi::c_void"]
+            - "${indices_or_offsets}"
+            - $OP
+
+  - name: svprf{size_literal[1]}_gather[_{type[0]}base]
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - no_provenance: bases
+    substitutions:
+      textual_size:
+        match_size: "{type[1]}"
+        default: words
+        byte: bytes
+        halfword: halfwords
+        doubleword: doublewords
+    doc: Prefetch {textual_size}
+    types:
+      - [[u32, u64], [i8, i16, i32, i64]]
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"]
+    static_defs: ["const OP: svprfop"]
+    assert_instr: [["prf{size_literal[1]}", "OP = {{svprfop::SV_PLDL1KEEP}}"]]
+    test: { load: 0 }
+    compose:
+      - LLVMLink:
+          name: "prf{size_literal[1]}.gather.scalar.offset.{sve_type[0]}"
+          arguments:
+            - "pg: {predicate[0]}"
+            - "bases: {sve_type[0]}"
+            - "index: i64"
+            - "op: svprfop"
+      - FnCall: ["{llvm_link}", [$pg, $bases, 0, $OP]]
+
+  - name: svprf{size_literal[1]}_gather[_{type[0]}base]_{index_or_offset}
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - no_provenance: bases
+    substitutions:
+      index_or_offset:
+        { match_size: "{type[1]}", default: "index", byte: "offset" }
+      textual_size:
+        match_size: "{type[1]}"
+        default: words
+        byte: bytes
+        halfword: halfwords
+        doubleword: doublewords
+    doc: Prefetch {textual_size}
+    types:
+      - [[u32, u64], [i8, i16, i32, i64]]
+    arguments:
+      ["pg: {predicate[0]}", "bases: {sve_type[0]}", "{index_or_offset}: i64"]
+    static_defs: ["const OP: svprfop"]
+    assert_instr: [["prfb", "OP = {{svprfop::SV_PLDL1KEEP}}"]]
+    test: { load: 0 }
+    compose:
+      - LLVMLink:
+          name: "prf{size_literal[1]}.gather.scalar.offset.{sve_type[0]}"
+          arguments:
+            - "pg: {predicate[0]}"
+            - "bases: {sve_type[0]}"
+            - "{index_or_offset}: i64"
+            - "op: svprfop"
+      - FnCall:
+          - "{llvm_link}"
+          - - $pg
+            - $bases
+            - MatchSize:
+                - "{type[1]}"
+                - byte: $offset
+                  halfword: { MethodCall: [$index, unchecked_shl, [1]] }
+                  default: { MethodCall: [$index, unchecked_shl, [2]] }
+                  doubleword: { MethodCall: [$index, unchecked_shl, [3]] }
+            - $OP
+
+  - name: svcvt_{type[0]}[_{type[1]}]{_mxz}
+    doc: Floating-point convert
+    arguments:
+      ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[f32, f64], [i32, u32, i64, u64]]
+    zeroing_method: { drop: inactive }
+    substitutions:
+      convert_from: { match_kind: "{type[1]}", default: s, unsigned: u }
+    assert_instr: ["{convert_from}cvtf"]
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              MatchSize:
+                - "{type[1]}"
+                - default:
+                    LLVMLink:
+                      name: "{convert_from}cvtf.{sve_type[0]}.{sve_type[1]}"
+                  doubleword:
+                    LLVMLink:
+                      name: "{convert_from}cvtf.{type[0]}{type[1]}"
+            doubleword:
+              LLVMLink:
+                name: "{convert_from}cvtf.{sve_type[0]}.{sve_type[1]}"
+
+  - name: svcvt_{type[0]}[_{type[1]}]{_mxz}
+    doc: Floating-point convert
+    arguments:
+      ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i32, u32, i64, u64], [f32, f64]]
+    zeroing_method: { drop: inactive }
+    substitutions:
+      convert_to: { match_kind: "{type[0]}", default: s, unsigned: u }
+    assert_instr: ["fcvtz{convert_to}"]
+    compose:
+      - LLVMLink: { name: "fcvtz{convert_to}.{type[0]}{type[1]}" }
+
+  - name: svcvt_{type[0]}[_{type[1]}]{_mxz}
+    doc: Floating-point convert
+    arguments:
+      ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[f32, f64], [f64, f32]]
+    zeroing_method: { drop: inactive }
+    assert_instr: [fcvt]
+    compose:
+      - LLVMLink: { name: "fcvt.{type[0]}{type[1]}" }
+
+  - name: svreinterpret_{type[0]}[_{type[1]}]
+    doc: Reinterpret vector contents
+    arguments: ["op: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    assert_instr: []
+    types:
+      - - [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+        - [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    compose:
+      - FnCall: ["simd_reinterpret", [$op]]
+
+  - name: svrinta[_{type}]{_mxz}
+    doc: Round to nearest, ties away from zero
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    assert_instr: [frinta]
+    zeroing_method: { drop: inactive }
+    compose:
+      - LLVMLink: { name: "frinta.{sve_type}" }
+
+  - name: svrinti[_{type}]{_mxz}
+    doc: Round using current rounding mode (inexact)
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    assert_instr: [frinti]
+    zeroing_method: { drop: inactive }
+    compose:
+      - LLVMLink: { name: "frinti.{sve_type}" }
+
+  - name: svrintm[_{type}]{_mxz}
+    doc: Round towards -∞
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    assert_instr: [frintm]
+    zeroing_method: { drop: inactive }
+    compose:
+      - LLVMLink: { name: "frintm.{sve_type}" }
+
+  - name: svrintn[_{type}]{_mxz}
+    doc: Round to nearest, ties to even
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    assert_instr: [frintn]
+    zeroing_method: { drop: inactive }
+    compose:
+      - LLVMLink: { name: "frintn.{sve_type}" }
+
+  - name: svrintp[_{type}]{_mxz}
+    doc: Round towards +∞
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    assert_instr: [frintp]
+    zeroing_method: { drop: inactive }
+    compose:
+      - LLVMLink: { name: "frintp.{sve_type}" }
+
+  - name: svrintx[_{type}]{_mxz}
+    doc: Round using current rounding mode (exact)
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    assert_instr: [frintx]
+    zeroing_method: { drop: inactive }
+    compose:
+      - LLVMLink: { name: "frintx.{sve_type}" }
+
+  - name: svrintz[_{type}]{_mxz}
+    doc: Round towards zero
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    assert_instr: [frintz]
+    zeroing_method: { drop: inactive }
+    compose:
+      - LLVMLink: { name: "frintz.{sve_type}" }
+
+  - name: svabd[{_n}_{type}]{_mxz}
+    doc: Absolute difference
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f64, f32, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["{type_kind}abd"]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind}abd.{sve_type}" }
+
+  - name: svabs[_{type}]{_mxz}
+    doc: Absolute value
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64]
+    assert_instr: ["{type_kind.f}abs"]
+    zeroing_method: { drop: inactive }
+    compose:
+      - LLVMLink: { name: "{type_kind.f}abs.{sve_type}" }
+
+  - name: svand[{_n}_{type}]{_mxz}
+    doc: Bitwise AND
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    assert_instr: [and]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    n_variant_op: op2
+    zeroing_method: { select: op1 }
+    compose:
+      - LLVMLink: { name: "and.{sve_type}" }
+
+  - name: svandv[_{type}]
+    doc: Bitwise AND reduction to scalar
+    arguments: ["pg: {predicate}", "op: {sve_type}"]
+    return_type: "{type}"
+    assert_instr: [andv]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    compose:
+      - LLVMLink: { name: "andv.{sve_type}" }
+
+  - name: svand[_b]_z
+    doc: Bitwise AND
+    arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"]
+    return_type: svbool_t
+    assert_instr: [and]
+    compose:
+      - LLVMLink: { name: "and.z.nvx16i1" }
+
+  - name: svmov[_b]_z
+    doc: Move
+    arguments: ["pg: svbool_t", "op: svbool_t"]
+    return_type: svbool_t
+    assert_instr: [mov]
+    compose:
+      - FnCall: ["svand_b_z", [$pg, $op, $op]]
+
+  - name: svbic[{_n}_{type}]{_mxz}
+    doc: Bitwise clear
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    assert_instr: [bic]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    n_variant_op: op2
+    zeroing_method: { select: op1 }
+    compose:
+      - LLVMLink: { name: "bic.{sve_type}" }
+
+  - name: svbic[_b]_z
+    doc: Bitwise clear
+    arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"]
+    return_type: svbool_t
+    assert_instr: [bic]
+    compose:
+      - LLVMLink: { name: "bic.z.nvx16i1" }
+
+  - name: sveor[{_n}_{type}]{_mxz}
+    doc: Bitwise exclusive OR
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    assert_instr: [eor]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    n_variant_op: op2
+    zeroing_method: { select: op1 }
+    compose:
+      - LLVMLink: { name: "eor.{sve_type}" }
+
+  - name: sveorv[_{type}]
+    doc: Bitwise exclusive OR reduction to scalar
+    arguments: ["pg: {predicate}", "op: {sve_type}"]
+    return_type: "{type}"
+    assert_instr: [eorv]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    compose:
+      - LLVMLink: { name: "eorv.{sve_type}" }
+
+  - name: sveor[_b]_z
+    doc: Bitwise exclusive OR
+    arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"]
+    return_type: svbool_t
+    assert_instr: [eor]
+    compose:
+      - LLVMLink: { name: "eor.z.nvx16i1" }
+
+  - name: svnot[_{type}]{_mxz}
+    doc: Bitwise invert
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    assert_instr: [not]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    zeroing_method: { drop: inactive }
+    compose:
+      - LLVMLink: { name: "not.{sve_type}" }
+
+  - name: svnot[_b]_z
+    doc: Bitwise invert
+    arguments: ["pg: svbool_t", "op: svbool_t"]
+    return_type: svbool_t
+    assert_instr: [not]
+    compose:
+      - FnCall: ["sveor_b_z", [$pg, $op, $pg]]
+
+  - name: svcnot[_{type}]{_mxz}
+    doc: Logically invert boolean condition
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    assert_instr: [cnot]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    zeroing_method: { drop: inactive }
+    compose:
+      - LLVMLink: { name: "cnot.{sve_type}" }
+
+  - name: svnand[_b]_z
+    doc: Bitwise NAND
+    arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"]
+    return_type: svbool_t
+    assert_instr: [nand]
+    compose:
+      - LLVMLink: { name: "nand.z.nxv16i1" }
+
+  - name: svnor[_b]_z
+    doc: Bitwise NOR
+    arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"]
+    return_type: svbool_t
+    assert_instr: [nor]
+    compose:
+      - LLVMLink: { name: "nor.z.nxv16i1" }
+
+  - name: svorr[{_n}_{type}]{_mxz}
+    doc: Bitwise inclusive OR
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    assert_instr: [orr]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    n_variant_op: op2
+    zeroing_method: { select: op1 }
+    compose:
+      - LLVMLink: { name: "orr.{sve_type}" }
+
+  - name: svorv[_{type}]
+    doc: Bitwise inclusive OR reduction to scalar
+    arguments: ["pg: {predicate}", "op: {sve_type}"]
+    return_type: "{type}"
+    assert_instr: [orv]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    compose:
+      - LLVMLink: { name: "orv.{sve_type}" }
+
+  - name: svorr[_b]_z
+    doc: Bitwise inclusive OR
+    arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"]
+    return_type: svbool_t
+    assert_instr: [orr]
+    compose:
+      - LLVMLink: { name: "orr.z.nvx16i1" }
+
+  - name: svorn[_b]_z
+    doc: Bitwise inclusive OR, inverting second argument
+    arguments: ["pg: svbool_t", "op1: svbool_t", "op2: svbool_t"]
+    return_type: svbool_t
+    assert_instr: [orn]
+    compose:
+      - LLVMLink: { name: "orn.z.nvx16i1" }
+
+  - name: svlsl[{_n}_{type[0]}]{_mxz}
+    doc: Logical shift left
+    arguments:
+      ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i8, u8], u8]
+      - [[i16, u16], u16]
+      - [[i32, u32], u32]
+      - [[i64, u64], u64]
+    assert_instr: [lsl]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "lsl.{sve_type[0]}" }
+
+  - name: svlsl_wide[{_n}_{type[0]}]{_mxz}
+    doc: Logical shift left
+    arguments:
+      ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i8, i16, i32, u8, u16, u32], u64]
+    assert_instr: [lsl]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "lsl.wide.{sve_type[0]}" }
+
+  - name: svasr[{_n}_{type[0]}]{_mxz}
+    doc: Arithmetic shift right
+    arguments:
+      ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i8, u8]
+      - [i16, u16]
+      - [i32, u32]
+      - [i64, u64]
+    assert_instr: [asr]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "asr.{sve_type[0]}" }
+
+  - name: svasr_wide[{_n}_{type[0]}]{_mxz}
+    doc: Arithmetic shift right
+    arguments:
+      ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i8, i16, i32], u64]
+    assert_instr: [asr]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "asr.wide.{sve_type[0]}" }
+
+  - name: svasrd[_n_{type}]{_mxz}
+    doc: Arithmetic shift right for divide by immediate
+    arguments: ["pg: {predicate}", "op1: {sve_type}"]
+    return_type: "{sve_type}"
+    static_defs: ["const IMM2: i32"]
+    constraints: [{ variable: IMM2, range: ["1", "{size}"] }]
+    types: [i8, i16, i32, i64]
+    assert_instr: [[asrd, "IMM2 = 1"]]
+    zeroing_method: { select: op1 }
+    compose:
+      - LLVMLink:
+          name: "asrd.{sve_type}"
+          arguments: ["pg: {predicate}", "op1: {sve_type}", "imm2: i32"]
+      - FnCall: ["{llvm_link}", [$pg, $op1, $IMM2]]
+
+  - name: svlsr[{_n}_{type}]{_mxz}
+    doc: Logical shift right
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [u8, u16, u32, u64]
+    assert_instr: [lsr]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "lsr.{sve_type}" }
+
+  - name: svlsr_wide[{_n}_{type[0]}]{_mxz}
+    doc: Logical shift right
+    arguments:
+      ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[u8, u16, u32], u64]
+    assert_instr: [lsr]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "lsr.wide.{sve_type[0]}" }
+
+  - name: svadda[_{type}]
+    doc: Add reduction (strictly-ordered)
+    arguments: ["pg: {predicate}", "initial: {type}", "op: {sve_type}"]
+    return_type: "{type}"
+    assert_instr: [fadda]
+    types: [f32, f64]
+    compose:
+      - LLVMLink: { name: "fadda.{sve_type}" }
+
+  - name: svaddv[_{type}]
+    doc: Add reduction
+    arguments: ["pg: {predicate}", "op: {sve_type}"]
+    return_type: "{type}"
+    types: [f32, f64, i64, u64]
+    assert_instr: [{ float: faddv, default: uaddv }]
+    compose:
+      - LLVMLink: { name: "{type_kind.fsu}addv.{sve_type}" }
+
+  - name: svaddv[_{type[0]}]
+    doc: Add reduction
+    arguments: ["pg: {predicate[0]}", "op: {sve_type[0]}"]
+    return_type: "{type[1]}"
+    types:
+      - [[i8, i16, i32], i64]
+      - [[u8, u16, u32], u64]
+    assert_instr: ["{type_kind[0].su}addv"]
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}addv.{sve_type[0]}" }
+
+  - name: svmaxv[_{type}]
+    doc: Maximum reduction to scalar
+    arguments: ["pg: {predicate}", "op: {sve_type}"]
+    return_type: "{type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["{type_kind.fsu}maxv"]
+    compose:
+      - LLVMLink: { name: "{type_kind.fsu}maxv.{sve_type}" }
+
+  - name: svmaxnmv[_{type}]
+    doc: Maximum number reduction to scalar
+    arguments: ["pg: {predicate}", "op: {sve_type}"]
+    return_type: "{type}"
+    types: [f32, f64]
+    assert_instr: [fmaxnmv]
+    compose:
+      - LLVMLink: { name: "fmaxnmv.{sve_type}" }
+
+  - name: svminv[_{type}]
+    doc: Minimum reduction to scalar
+    arguments: ["pg: {predicate}", "op: {sve_type}"]
+    return_type: "{type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["{type_kind.fsu}minv"]
+    compose:
+      - LLVMLink: { name: "{type_kind.fsu}minv.{sve_type}" }
+
+  - name: svminnmv[_{type}]
+    doc: Minimum number reduction to scalar
+    arguments: ["pg: {predicate}", "op: {sve_type}"]
+    return_type: "{type}"
+    types: [f32, f64]
+    assert_instr: [fminnmv]
+    compose:
+      - LLVMLink: { name: "fminnmv.{sve_type}" }
+
+  - name: svmul[{_n}_{type}]{_mxz}
+    doc: Multiply
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    assert_instr: ["{type_kind.f}mul"]
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.f}mul.{sve_type}" }
+
+  - name: svmulh[{_n}_{type}]{_mxz}
+    doc: Multiply, returning high-half
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    assert_instr: ["{type_kind.su}mulh"]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.su}mulh.{sve_type}" }
+
+  - name: svmulx[{_n}_{type}]{_mxz}
+    doc: Multiply extended (∞×0=2)
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    assert_instr: ["fmulx"]
+    types: [f32, f64]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "fmulx.{sve_type}" }
+
+  - name: svrecpe[_{type}]
+    doc: Reciprocal estimate
+    arguments: ["op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    assert_instr: [frecpe]
+    compose:
+      - LLVMLink: { name: "frecpe.x.{sve_type}" }
+
+  - name: svrecps[_{type}]
+    doc: Reciprocal step
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    assert_instr: [frecps]
+    compose:
+      - LLVMLink: { name: "frecps.x.{sve_type}" }
+
+  - name: svrecpx[_{type}]{_mxz}
+    doc: Reciprocal exponent
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    assert_instr: [frecpx]
+    zeroing_method: { drop: inactive }
+    compose:
+      - LLVMLink: { name: "frecpx.x.{sve_type}" }
+
+  - name: svrsqrte[_{type}]
+    doc: Reciprocal square root estimate
+    arguments: ["op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    assert_instr: [frsqrte]
+    compose:
+      - LLVMLink: { name: "frsqrte.x.{sve_type}" }
+
+  - name: svrsqrts[_{type}]
+    doc: Reciprocal square root step
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    assert_instr: [frsqrts]
+    compose:
+      - LLVMLink: { name: "frsqrts.x.{sve_type}" }
+
+  - name: svmad[{_n}_{type}]{_mxz}
+    doc: Multiply-add, multiplicand first
+    arguments:
+      - "pg: {predicate}"
+      - "op1: {sve_type}"
+      - "op2: {sve_type}"
+      - "op3: {sve_type}"
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    zeroing_method: { select: op1 }
+    n_variant_op: op3
+    assert_instr: ["{type_kind.f}mad"]
+    compose:
+      - LLVMLink: { name: "{type_kind.f}mad.{sve_type}" }
+
+  - name: svmla[{_n}_{type}]{_mxz}
+    doc: Multiply-add, addend first
+    arguments:
+      - "pg: {predicate}"
+      - "op1: {sve_type}"
+      - "op2: {sve_type}"
+      - "op3: {sve_type}"
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    zeroing_method: { select: op1 }
+    n_variant_op: op3
+    assert_instr: ["{type_kind.f}mla"]
+    compose:
+      - LLVMLink: { name: "{type_kind.f}mla.{sve_type}" }
+
+  - name: svmla_lane[_{type}]
+    doc: Multiply-add, addend first
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }]
+    types: [f32, f64]
+    assert_instr: [[fmla, "IMM_INDEX = 0"]]
+    compose:
+      - LLVMLink:
+          name: "fmla.lane.{sve_type}"
+          arguments:
+            - "op1: {sve_type}"
+            - "op2: {sve_type}"
+            - "op3: {sve_type}"
+            - "IMM_INDEX: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]]
+
+  - name: svmls[{_n}_{type}]{_mxz}
+    doc: Multiply-subtract, minuend first
+    arguments:
+      - "pg: {predicate}"
+      - "op1: {sve_type}"
+      - "op2: {sve_type}"
+      - "op3: {sve_type}"
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    zeroing_method: { select: op1 }
+    n_variant_op: op3
+    assert_instr: ["{type_kind.f}mls"]
+    compose:
+      - LLVMLink: { name: "{type_kind.f}mls.{sve_type}" }
+
+  - name: svmls_lane[_{type}]
+    doc: Multiply-subtract, minuend first
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }]
+    types: [f32, f64]
+    assert_instr: [[fmls, "IMM_INDEX = 0"]]
+    compose:
+      - LLVMLink:
+          name: "fmls.lane.{sve_type}"
+          arguments:
+            - "op1: {sve_type}"
+            - "op2: {sve_type}"
+            - "op3: {sve_type}"
+            - "IMM_INDEX: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]]
+
+  - name: svmsb[{_n}_{type}]{_mxz}
+    doc: Multiply-subtract, multiplicand first
+    arguments:
+      - "pg: {predicate}"
+      - "op1: {sve_type}"
+      - "op2: {sve_type}"
+      - "op3: {sve_type}"
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    zeroing_method: { select: op1 }
+    n_variant_op: op3
+    assert_instr: ["{type_kind.f}msb"]
+    compose:
+      - LLVMLink: { name: "{type_kind.f}msb.{sve_type}" }
+
+  - name: svnmad[{_n}_{type}]{_mxz}
+    doc: Negated multiply-add, multiplicand first
+    arguments:
+      - "pg: {predicate}"
+      - "op1: {sve_type}"
+      - "op2: {sve_type}"
+      - "op3: {sve_type}"
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    zeroing_method: { select: op1 }
+    n_variant_op: op3
+    assert_instr: [fnmad]
+    compose:
+      - LLVMLink: { name: "fnmad.{sve_type}" }
+
+  - name: svnmla[{_n}_{type}]{_mxz}
+    doc: Negated multiply-add, addend first
+    arguments:
+      - "pg: {predicate}"
+      - "op1: {sve_type}"
+      - "op2: {sve_type}"
+      - "op3: {sve_type}"
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    zeroing_method: { select: op1 }
+    n_variant_op: op3
+    assert_instr: [fnmla]
+    compose:
+      - LLVMLink: { name: "fnmla.{sve_type}" }
+
+  - name: svnmls[{_n}_{type}]{_mxz}
+    doc: Negated multiply-subtract, minuend first
+    arguments:
+      - "pg: {predicate}"
+      - "op1: {sve_type}"
+      - "op2: {sve_type}"
+      - "op3: {sve_type}"
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    zeroing_method: { select: op1 }
+    n_variant_op: op3
+    assert_instr: [fnmls]
+    compose:
+      - LLVMLink: { name: "fnmls.{sve_type}" }
+
+  - name: svnmsb[{_n}_{type}]{_mxz}
+    doc: Negated multiply-subtract, multiplicand first
+    arguments:
+      - "pg: {predicate}"
+      - "op1: {sve_type}"
+      - "op2: {sve_type}"
+      - "op3: {sve_type}"
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    zeroing_method: { select: op1 }
+    n_variant_op: op3
+    assert_instr: [fnmsb]
+    compose:
+      - LLVMLink: { name: "fnmsb.{sve_type}" }
+
+  - name: svneg[_{type}]{_mxz}
+    doc: Negate
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64]
+    assert_instr: ["{type_kind.f}neg"]
+    zeroing_method: { drop: inactive }
+    compose:
+      - LLVMLink: { name: "{type_kind.f}neg.{sve_type}" }
+
+  - name: svqadd[{_n}_{type}]
+    doc: Saturating add
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["{type_kind.su}qadd"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.su}qadd.x.{sve_type}" }
+
+  - name: svadr{size_literal[2]}[_{type[0]}base]_[{type[1]}]{index_or_offset}
+    substitutions:
+      index_or_offset: { match_size: "{type[2]}", default: index, byte: offset }
+      indices_or_offsets:
+        { match_size: "{type[2]}", default: indices, byte: offsets }
+    doc: Compute vector addresses for {size[2]}-bit data
+    arguments: ["bases: {sve_type[0]}", "{indices_or_offsets}: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [u32, [i32, u32], [i8, i16, i32, i64]]
+      - [u64, [i64, u64], [i8, i16, i32, i64]]
+    assert_instr: [adr]
+    compose:
+      - LLVMLink: { name: "adr{size_literal[2]}.{sve_type[0]}" }
+
+  - name: svdot[{_n}_{type[0]}]
+    doc: Dot product
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i32, i8]
+      - [i64, i16]
+      - [u32, u8]
+      - [u64, u16]
+    assert_instr: ["{type_kind[0].su}dot"]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}dot.{sve_type[0]}" }
+
+  - name: svdot_lane[_{type[0]}]
+    doc: Dot product
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[0]}" }]
+    types:
+      - [i32, i8]
+      - [i64, i16]
+      - [u32, u8]
+      - [u64, u16]
+    assert_instr: [["{type_kind[0].su}dot", "IMM_INDEX = 0"]]
+    compose:
+      - LLVMLink:
+          name: "{type_kind[0].su}dot.lane.{sve_type[0]}"
+          arguments:
+            - "op1: {sve_type[0]}"
+            - "op2: {sve_type[1]}"
+            - "op3: {sve_type[1]}"
+            - "imm_index: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]]
+
+  - name: svusdot[{_n}_{type[0]}]
+    doc: Dot product (unsigned × signed)
+    target_features: [i8mm]
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"]
+    return_type: "{sve_type[0]}"
+    types: [[i32, u8, i8]]
+    assert_instr: [usdot]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "usdot.{sve_type[0]}" }
+
+  - name: svusdot_lane[_{type[0]}]
+    doc: Dot product (unsigned × signed)
+    target_features: [i8mm]
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"]
+    return_type: "{sve_type[0]}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[0]}" }]
+    types: [[i32, u8, i8]]
+    assert_instr: [[usdot, "IMM_INDEX = 0"]]
+    compose:
+      - LLVMLink:
+          name: "usdot.lane.{sve_type[0]}"
+          arguments:
+            - "op1: {sve_type[0]}"
+            - "op2: {sve_type[1]}"
+            - "op3: {sve_type[2]}"
+            - "imm_index: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]]
+
+  - name: svsudot[{_n}_{type[0]}]
+    doc: Dot product (signed × unsigned)
+    target_features: [i8mm]
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"]
+    return_type: "{sve_type[0]}"
+    types: [[i32, i8, u8]]
+    assert_instr: [usdot]
+    n_variant_op: op3
+    compose:
+      - FnCall: ["svusdot_{type[0]}", [$op1, $op3, $op2]]
+
+  - name: svsudot_lane[_{type[0]}]
+    doc: Dot product (signed × unsigned)
+    target_features: [i8mm]
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"]
+    return_type: "{sve_type[0]}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[0]}" }]
+    types: [[i32, i8, u8]]
+    assert_instr: [[sudot, "IMM_INDEX = 0"]]
+    compose:
+      - LLVMLink:
+          name: "sudot.lane.{sve_type[0]}"
+          arguments:
+            - "op1: {sve_type[0]}"
+            - "op2: {sve_type[1]}"
+            - "op3: {sve_type[2]}"
+            - "imm_index: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]]
+
+  - name: svdiv[{_n}_{type}]{_mxz}
+    doc: Divide
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i32, i64, u32, u64]
+    assert_instr: ["{type_kind.fsu}div"]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.fsu}div.{sve_type}" }
+
+  - name: svdivr[{_n}_{type}]{_mxz}
+    doc: Divide reversed
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i32, i64, u32, u64]
+    assert_instr: ["{type_kind.fsu}divr"]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.fsu}divr.{sve_type}" }
+
+  - name: svexpa[_{type[0]}]
+    doc: Floating-point exponential accelerator
+    arguments: ["op: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[f32, u32], [f64, u64]]
+    assert_instr: [fexpa]
+    compose:
+      - LLVMLink: { name: "fexpa.x.{sve_type[0]} " }
+
+  - name: svscale[{_n}_{type[0]}]{_mxz}
+    doc: Adjust exponent
+    arguments:
+      ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[f32, i32], [f64, i64]]
+    assert_instr: [fscale]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "fscale.{sve_type[0]}" }
+
+  - name: svmmla[_{type}]
+    doc: Matrix multiply-accumulate
+    target_features: [f32mm]
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32]
+    assert_instr: [fmmla]
+    compose:
+      - LLVMLink: { name: "fmmla.{sve_type}" }
+
+  - name: svmmla[_{type}]
+    doc: Matrix multiply-accumulate
+    target_features: [f64mm]
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f64]
+    assert_instr: [fmmla]
+    compose:
+      - LLVMLink: { name: "fmmla.{sve_type}" }
+
+  - name: svmmla[_{type[0]}]
+    doc: Matrix multiply-accumulate
+    target_features: [i8mm]
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[i32, i8], [u32, u8]]
+    assert_instr: ["{type_kind[0].su}mmla"]
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}mmla.{sve_type[0]}" }
+
+  - name: svusmmla[_{type[0]}]
+    doc: Matrix multiply-accumulate (unsigned × signed)
+    target_features: [i8mm]
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[2]}"]
+    return_type: "{sve_type[0]}"
+    types: [[i32, u8, i8]]
+    assert_instr: [usmmla]
+    compose:
+      - LLVMLink: { name: "usmmla.{sve_type[0]}" }
+
+  - name: svmin[{_n}_{type}]{_mxz}
+    doc: Minimum
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    zeroing_method: { select: op1 }
+    assert_instr: ["{type_kind.fsu}min"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.fsu}min.{sve_type}" }
+
+  - name: svminnm[{_n}_{type}]{_mxz}
+    doc: Minimum number
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    zeroing_method: { select: op1 }
+    assert_instr: [fminnm]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "fminnm.{sve_type}" }
diff --git a/crates/stdarch-gen2/spec/sve/sve2.spec.yml b/crates/stdarch-gen2/spec/sve/sve2.spec.yml
new file mode 100644
index 0000000000..8f15c56130
--- /dev/null
+++ b/crates/stdarch-gen2/spec/sve/sve2.spec.yml
@@ -0,0 +1,2992 @@
+arch_cfgs:
+  - arch_name: aarch64
+    target_feature: [sve, sve2]
+    llvm_prefix: llvm.aarch64.sve
+intrinsics:
+  - name: svbext[{_n}_{type}]
+    target_features: [sve2-bitperm]
+    doc: Gather lower bits from positions selected by bitmask
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [u8, u16, u32, u64]
+    assert_instr: [bext]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "bext.x.{sve_type}" }
+
+  - name: svbgrp[{_n}_{type}]
+    target_features: [sve2-bitperm]
+    doc: Group bits to right or left as selected by bitmask
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [u8, u16, u32, u64]
+    assert_instr: [bgrp]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "bgrp.x.{sve_type}" }
+
+  - name: svbdep[{_n}_{type}]
+    target_features: [sve2-bitperm]
+    doc: Scatter lower bits into positions selected by bitmask
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [u8, u16, u32, u64]
+    assert_instr: [bdep]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "bdep.x.{sve_type}" }
+
+  - name: svhistcnt[_{type[0]}]_z
+    doc: Count matching elements
+    arguments:
+      ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types: [[i32, u32], [i64, u64], [u32, u32], [u64, u64]]
+    assert_instr: [histcnt]
+    compose:
+      - LLVMLink: { name: "histcnt.{sve_type[0]}" }
+
+  - name: svhistseg[_{type[0]}]
+    doc: Count matching elements in 128-bit segments
+    arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types: [[i8, u8], [u8, u8]]
+    assert_instr: [histseg]
+    compose:
+      - LLVMLink: { name: "histseg.{sve_type[0]}" }
+
+  - name: svmatch[_{type}]
+    doc: Detect any matching elements
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{predicate}"
+    types: [i8, i16, u8, u16]
+    assert_instr: [match]
+    compose:
+      - LLVMLink: { name: "match.{sve_type}" }
+
+  - name: svnmatch[_{type}]
+    doc: Detect no matching elements
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{predicate}"
+    types: [i8, i16, u8, u16]
+    assert_instr: [nmatch]
+    compose:
+      - LLVMLink: { name: "nmatch.{sve_type}" }
+
+  - name: svhadd[{_n}_{type}]{_mxz}
+    doc: Halving add
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    zeroing_method: { select: op1 }
+    assert_instr: ["{type_kind.su}hadd"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.su}hadd.{sve_type}" }
+
+  - name: svrhadd[{_n}_{type}]{_mxz}
+    doc: Rounding halving add
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    zeroing_method: { select: op1 }
+    assert_instr: ["{type_kind.su}rhadd"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.su}rhadd.{sve_type}" }
+
+  - name: svaddhnb[{_n}_{type[0]}]
+    doc: Add narrow high part (bottom)
+    types:
+      [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]]
+    return_type: "{sve_type[1]}"
+    arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"]
+    assert_instr: [addhnb]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "addhnb.{sve_type[0]}" }
+
+  - name: svaddhnt[{_n}_{type[0]}]
+    doc: Add narrow high part (top)
+    types:
+      [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]]
+    return_type: "{sve_type[1]}"
+    arguments:
+      ["even: {sve_type[1]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"]
+    assert_instr: [addhnt]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "addhnt.{sve_type[0]}" }
+
+  - name: svraddhnb[{_n}_{type[0]}]
+    doc: Rounding add narrow high part (bottom)
+    types:
+      [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]]
+    return_type: "{sve_type[1]}"
+    arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"]
+    assert_instr: [raddhnb]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "raddhnb.{sve_type[0]}" }
+
+  - name: svraddhnt[{_n}_{type[0]}]
+    doc: Rounding add narrow high part (top)
+    types:
+      [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]]
+    return_type: "{sve_type[1]}"
+    arguments:
+      ["even: {sve_type[1]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"]
+    assert_instr: [raddhnt]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "raddhnt.{sve_type[0]}" }
+
+  - name: svcadd[_{type}]
+    doc: Complex add with rotate
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    static_defs: ["const IMM_ROTATION: i32"]
+    constraints: [{ variable: IMM_ROTATION, any_values: [90, 270] }]
+    assert_instr: [[cadd, "IMM_ROTATION = 90"]]
+    compose:
+      - LLVMLink:
+          name: cadd.x.{sve_type}
+          arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_rotation: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $op2, $IMM_ROTATION]]
+
+  - name: svcdot[_{type[0]}]
+    doc: Complex dot product
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[i32, i8], [i64, i16]]
+    static_defs: ["const IMM_ROTATION: i32"]
+    constraints: [{ variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }]
+    assert_instr: [[cdot, "IMM_ROTATION = 90"]]
+    compose:
+      - LLVMLink:
+          name: cdot.{sve_type[0]}
+          arguments:
+            - "op1: {sve_type[0]}"
+            - "op2: {sve_type[1]}"
+            - "op3: {sve_type[1]}"
+            - "imm_rotation: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_ROTATION]]
+
+  - name: svcdot_lane[_{type[0]}]
+    doc: Complex dot product
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[i32, i8], [i64, i16]]
+    static_defs: ["const IMM_INDEX: i32", "const IMM_ROTATION: i32"]
+    constraints:
+      - { variable: IMM_INDEX, vec_max_elems_type: "{type[0]}" }
+      - { variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }
+    assert_instr: [[cdot, "IMM_INDEX = 0, IMM_ROTATION = 90"]]
+    compose:
+      - LLVMLink:
+          name: cdot.lane.{sve_type[0]}
+          arguments:
+            - "op1: {sve_type[0]}"
+            - "op2: {sve_type[1]}"
+            - "op3: {sve_type[1]}"
+            - "imm_index: i32"
+            - "imm_rotation: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX, $IMM_ROTATION]]
+
+  - name: svcmla[_{type}]
+    doc: Complex multiply-add with rotate
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    static_defs: ["const IMM_ROTATION: i32"]
+    constraints: [{ variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }]
+    assert_instr: [[cmla, "IMM_ROTATION = 90"]]
+    compose:
+      - LLVMLink:
+          name: cmla.x.{sve_type}
+          arguments:
+            - "op1: {sve_type}"
+            - "op2: {sve_type}"
+            - "op3: {sve_type}"
+            - "imm_rotation: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_ROTATION]]
+
+  - name: svcmla_lane[_{type}]
+    doc: Complex multiply-add with rotate
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i16, i32, u16, u32]
+    static_defs: ["const IMM_INDEX: i32", "const IMM_ROTATION: i32"]
+    constraints:
+      - variable: IMM_INDEX
+        range: { match_size: "{type}", default: [0, 1], halfword: [0, 3] }
+      - { variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }
+    assert_instr: [[cmla, "IMM_INDEX = 0, IMM_ROTATION = 90"]]
+    compose:
+      - LLVMLink:
+          name: cmla.lane.x.{sve_type}
+          arguments:
+            - "op1: {sve_type}"
+            - "op2: {sve_type}"
+            - "op3: {sve_type}"
+            - "imm_index: i32"
+            - "imm_rotation: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX, $IMM_ROTATION]]
+
+  - name: svqrdcmlah[_{type}]
+    doc: Saturating rounding doubling complex multiply-add high with rotate
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64]
+    static_defs: ["const IMM_ROTATION: i32"]
+    constraints: [{ variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }]
+    assert_instr: [[sqrdcmlah, "IMM_ROTATION = 90"]]
+    compose:
+      - LLVMLink:
+          name: sqrdcmlah.x.{sve_type}
+          arguments:
+            - "op1: {sve_type}"
+            - "op2: {sve_type}"
+            - "op3: {sve_type}"
+            - "imm_rotation: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_ROTATION]]
+
+  - name: svqrdcmlah_lane[_{type}]
+    doc: Saturating rounding doubling complex multiply-add high with rotate
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i16, i32]
+    static_defs: ["const IMM_INDEX: i32", "const IMM_ROTATION: i32"]
+    constraints:
+      - variable: IMM_INDEX
+        range: { match_size: "{type}", default: [0, 1], halfword: [0, 3] }
+      - { variable: IMM_ROTATION, any_values: [0, 90, 180, 270] }
+    assert_instr: [[sqrdcmlah, "IMM_INDEX = 0, IMM_ROTATION = 90"]]
+    compose:
+      - LLVMLink:
+          name: sqrdcmlah.lane.x.{sve_type}
+          arguments:
+            - "op1: {sve_type}"
+            - "op2: {sve_type}"
+            - "op3: {sve_type}"
+            - "imm_index: i32"
+            - "imm_rotation: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX, $IMM_ROTATION]]
+
+  - name: svqcadd[_{type}]
+    doc: Saturating complex add with rotate
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64]
+    static_defs: ["const IMM_ROTATION: i32"]
+    constraints: [{ variable: "IMM_ROTATION", any_values: [90, 270] }]
+    assert_instr: [[sqcadd, "IMM_ROTATION = 90"]]
+    compose:
+      - LLVMLink:
+          name: "sqcadd.x.{sve_type}"
+          arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_rotation: i32"]
+      - FnCall: ["{llvm_link}", ["$op1", "$op2", "$IMM_ROTATION"]]
+
+  - name: svsublb[{_n}_{type[0]}]
+    doc: Subtract long (bottom)
+    arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}sublb"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink:
+          name: "{type_kind[0].su}sublb.{sve_type[0]}"
+
+  - name: svsublbt[{_n}_{type[0]}]
+    doc: Subtract long (bottom - top)
+    arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+    assert_instr: [ssublbt]
+    n_variant_op: op2
+    compose:
+      - LLVMLink:
+          name: "ssublbt.{sve_type[0]}"
+
+  - name: svsublt[{_n}_{type[0]}]
+    doc: Subtract long (top)
+    arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}sublt"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}sublt.{sve_type[0]}" }
+
+  - name: svsubltb[{_n}_{type[0]}]
+    doc: Subtract long (top - bottom)
+    arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+    assert_instr: [ssubltb]
+    n_variant_op: op2
+    compose:
+      - LLVMLink:
+          name: "ssubltb.{sve_type[0]}"
+
+  - name: svsubwb[{_n}_{type[0]}]
+    doc: Subtract wide (bottom)
+    arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}subwb"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}subwb.{sve_type[0]}" }
+
+  - name: svsubwt[{_n}_{type[0]}]
+    doc: Subtract wide (top)
+    arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}subwt"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}subwt.{sve_type[0]}" }
+
+  - name: svrsubhnb[{_n}_{type[0]}]
+    doc: Rounding subtract narrow high part (bottom)
+    arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: [rsubhnb]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "rsubhnb.{sve_type[0]}" }
+
+  - name: svrsubhnt[{_n}_{type[0]}]
+    doc: Rounding subtract narrow high part (top)
+    arguments:
+      ["even: {sve_type[1]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: [rsubhnt]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "rsubhnt.{sve_type[0]}" }
+
+  - name: svsubhnb[{_n}_{type[0]}]
+    doc: Subtract narrow high part (bottom)
+    arguments: ["op1: {sve_type[0]}", "op2: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: [subhnb]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "subhnb.{sve_type[0]}" }
+
+  - name: svsubhnt[{_n}_{type[0]}]
+    doc: Subtract narrow high part (top)
+    arguments:
+      ["even: {sve_type[1]}", "op1: {sve_type[0]}", "op2: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: [subhnt]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "subhnt.{sve_type[0]}" }
+
+  - name: svsbclb[{_n}_{type}]
+    doc: Subtract with borrow long (bottom)
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [u32, u64]
+    assert_instr: [sbclb]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "sbclb.{sve_type}" }
+
+  - name: svsbclt[{_n}_{type}]
+    doc: Subtract with borrow long (top)
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [u32, u64]
+    assert_instr: [sbclt]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "sbclt.{sve_type}" }
+
+  - name: svqsub[{_n}_{type}]{_mxz}
+    doc: Saturating subtract
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    zeroing_method: { select: op1 }
+    assert_instr: ["{type_kind.su}qsub"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.su}qsub.{sve_type}" }
+
+  - name: svqsubr[{_n}_{type}]{_mxz}
+    doc: Saturating subtract reversed
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    zeroing_method: { select: op1 }
+    assert_instr: ["{type_kind.su}qsubr"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.su}qsubr.{sve_type}" }
+
+  - name: svhsub[{_n}_{type}]{_mxz}
+    doc: Halving subtract
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["{type_kind.su}hsub"]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.su}hsub.{sve_type}" }
+
+  - name: svhsubr[{_n}_{type}]{_mxz}
+    doc: Halving subtract reversed
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["{type_kind.su}hsub"]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.su}hsubr.{sve_type}" }
+
+  - name: svwhilege_{sve_type[1]}[_{type[0]}]
+    doc: While decrementing scalar is greater than or equal to
+    arguments: ["op1: {type[0]}", "op2: {type[0]}"]
+    return_type: "{sve_type[1]}"
+    types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]]
+    assert_instr: [{ default: whilege, unsigned: whilehs }]
+    compose:
+      - MatchKind:
+          - "{type[0]}"
+          - default: { LLVMLink: { name: "whilege.{sve_type[1]}.{type[0]}" } }
+            unsigned: { LLVMLink: { name: "whilehs.{sve_type[1]}.{type[0]}" } }
+
+  - name: svwhilegt_{sve_type[1]}[_{type[0]}]
+    doc: While decrementing scalar is greater than
+    arguments: ["op1: {type[0]}", "op2: {type[0]}"]
+    return_type: "{sve_type[1]}"
+    types: [[[i32, i64, u32, u64], [b8, b16, b32, b64]]]
+    assert_instr: [{ default: whilegt, unsigned: whilehi }]
+    compose:
+      - MatchKind:
+          - "{type[0]}"
+          - default: { LLVMLink: { name: "whilegt.{sve_type[1]}.{type[0]}" } }
+            unsigned: { LLVMLink: { name: "whilehi.{sve_type[1]}.{type[0]}" } }
+
+  - name: svwhilerw_{size}ptr
+    safety:
+      unsafe: []
+    visibility: private
+    static_defs: [T]
+    substitutions:
+      size_alt:
+        match_size: "{type}"
+        byte: b
+        halfword: h
+        default: s
+        doubleword: d
+    arguments: ["op1: *T", "op2: *T"]
+    return_type: "{predicate}"
+    types: [i8, i16, i32, i64]
+    assert_instr: []
+    compose:
+      - Let: [op1, CastAs: [$op1, "*const crate::ffi::c_void"]]
+      - Let: [op2, CastAs: [$op2, "*const crate::ffi::c_void"]]
+      - LLVMLink:
+          name: "whilerw.{size_alt}.{predicate}.p0"
+          arguments: ["op1: *crate::ffi::c_void", "op2: *crate::ffi::c_void"]
+
+  - name: svwhilerw[_{type}]
+    doc: While free of read-after-write conflicts
+    # TODO: This might be safe even with unrelated pointers, but the LLVM builtin's guarantees don't
+    # seem to be documented, so we conservatively keep this unsafe for now.
+    safety:
+      unsafe:
+      - custom: "[`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints
+        must be met for at least the base pointers, `op1` and `op2`."
+    arguments: ["op1: *{type}", "op2: *{type}"]
+    return_type: "svbool_t"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [whilerw]
+    compose:
+      - FnCall:
+          - "svwhilerw_{size}ptr"
+          - - $op1
+            - $op2
+          - - Type: "{type}"
+
+  - name: svwhilewr_{size}ptr
+    safety:
+      unsafe: []
+    visibility: private
+    static_defs: [T]
+    substitutions:
+      size_alt:
+        match_size: "{type}"
+        byte: b
+        halfword: h
+        default: s
+        doubleword: d
+    arguments: ["op1: *T", "op2: *T"]
+    return_type: "{predicate}"
+    types: [i8, i16, i32, i64]
+    assert_instr: []
+    compose:
+      - Let: [op1, CastAs: [$op1, "*const crate::ffi::c_void"]]
+      - Let: [op2, CastAs: [$op2, "*const crate::ffi::c_void"]]
+      - LLVMLink:
+          name: "whilewr.{size_alt}.{predicate}.p0"
+          arguments: ["op1: *crate::ffi::c_void", "op2: *crate::ffi::c_void"]
+
+  - name: svwhilewr[_{type}]
+    doc: While free of write-after-read conflicts
+    # TODO: This might be safe even with unrelated pointers, but the LLVM builtin's guarantees don't
+    # seem to be documented, so we conservatively keep this unsafe for now.
+    safety:
+      unsafe:
+      - custom: "[`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints
+        must be met for at least the base pointers, `op1` and `op2`."
+    arguments: ["op1: *{type}", "op2: *{type}"]
+    return_type: "svbool_t"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [whilewr]
+    compose:
+      - FnCall:
+          - "svwhilewr_{size}ptr"
+          - - $op1
+            - $op2
+          - - Type: "{type}"
+
+  - name: svtbl2[_{type[0]}]
+    doc: Table lookup in two-vector table
+    arguments: ["data: {sve_type_x2[0]}", "indices: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [f32, u32]
+      - [f64, u64]
+      - [i8, u8]
+      - [i16, u16]
+      - [i32, u32]
+      - [i64, u64]
+      - [u8, u8]
+      - [u16, u16]
+      - [u32, u32]
+      - [u64, u64]
+    assert_instr: [tbl]
+    compose:
+      - LLVMLink:
+          name: "tbl2.{sve_type[0]}"
+          arguments:
+            - "data0: {sve_type[0]}"
+            - "data1: {sve_type[0]}"
+            - "indices: {sve_type[1]}"
+      - FnCall:
+          - "{llvm_link}"
+          - - FnCall: ["svget2_{type[0]}", ["$data"], [0]]
+            - FnCall: ["svget2_{type[0]}", ["$data"], [1]]
+            - $indices
+
+  - name: svtbx[_{type[0]}]
+    doc: Table lookup in single-vector table (merging)
+    arguments:
+      - "fallback: {sve_type[0]}"
+      - "data: {sve_type[0]}"
+      - "indices: {sve_type[1]}"
+    return_type: "{sve_type[0]}"
+    types:
+      - [f32, u32]
+      - [f64, u64]
+      - [i8, u8]
+      - [i16, u16]
+      - [i32, u32]
+      - [i64, u64]
+      - [u8, u8]
+      - [u16, u16]
+      - [u32, u32]
+      - [u64, u64]
+    assert_instr: [tbx]
+    compose:
+      - LLVMLink: { name: "tbx.{sve_type[0]}" }
+
+  - name: svcvtlt_{type[0]}[_{type[1]}]_m
+    doc: Up convert long (top)
+    arguments:
+      ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[f64, f32]]
+    assert_instr: [fcvtlt]
+    compose:
+      - LLVMLink: { name: "fcvtlt.{type[0]}{type[1]}" }
+
+  - name: svcvtlt_{type[0]}[_{type[1]}]_x
+    doc: Up convert long (top)
+    arguments: ["pg: svbool_t", "op: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[f64, f32]]
+    assert_instr: [fcvtlt]
+    compose:
+      - FnCall:
+        - "svcvtlt_{type[0]}_{type[1]}_m"
+        - - FnCall: ["simd_reinterpret", [$op]]
+          - $pg
+          - $op
+
+  - name: svcvtnt_{type[0]}[_{type[1]}]{_mx}
+    doc: Down convert and narrow (top)
+    arguments:
+      ["even: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[f32, f64]]
+    assert_instr: [fcvtnt]
+    compose:
+      - LLVMLink: { name: "fcvtnt.{type[0]}{type[1]}" }
+
+  - name: svcvtx_{type[0]}[_{type[1]}]{_mxz}
+    doc: Down convert, rounding to odd
+    arguments:
+      ["inactive: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[f32, f64]]
+    zeroing_method: { drop: inactive }
+    assert_instr: [fcvtx]
+    compose:
+      - LLVMLink: { name: "fcvtx.{type[0]}{type[1]}" }
+
+  - name: svcvtxnt_{type[0]}[_{type[1]}]{_mx}
+    doc: Down convert, rounding to odd (top)
+    arguments:
+      ["even: {sve_type[0]}", "pg: {max_predicate}", "op: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[f32, f64]]
+    assert_instr: [fcvtxnt]
+    compose:
+      - LLVMLink: { name: "fcvtxnt.{type[0]}{type[1]}" }
+
+  - name: svldnt1_gather_[{type[0]}]index[_{type[1]}]
+    doc: Unextended load, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - non_temporal
+    arguments:
+      ["pg: {predicate[0]}", "base: *{type[1]}", "indices: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [[i64, u64], [f64, i64, u64]]
+    assert_instr: ["ldnt1{size_literal[0]}"]
+    test: { load: 1 }
+    compose:
+      - LLVMLink: { name: "ldnt1.gather.index.{sve_type[1]}" }
+
+  - name: svldnt1_gather_[{type[0]}]offset[_{type[1]}]
+    doc: Unextended load, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - non_temporal
+    arguments:
+      ["pg: {predicate[0]}", "base: *{type[1]}", "offsets: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [f32, i32, u32]]
+      - [[i64, u64], [f64, i64, u64]]
+    assert_instr: ["ldnt1{size_literal[0]}"]
+    test: { load: 1 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink: { name: "ldnt1.gather.uxtw.{sve_type[1]}" }
+            doubleword:
+              LLVMLink: { name: "ldnt1.gather.{sve_type[1]}" }
+
+  - name: svldnt1_gather[_{type[0]}base]_offset_{type[1]}
+    doc: Unextended load, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+        - non_temporal
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [f32, i32, u32]]
+      - [u64, [f64, i64, u64]]
+    assert_instr: ["ldnt1{size_literal[0]}"]
+    test: { load: 1 }
+    compose:
+      - LLVMLink:
+          name: "ldnt1.gather.scalar.offset.{sve_type[1]}.{sve_type[0]}"
+
+  - name: svldnt1_gather[_{type[0]}base]_{type[1]}
+    doc: Unextended load, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+        - non_temporal
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [f32, i32, u32]]
+      - [u64, [f64, i64, u64]]
+    assert_instr: ["ldnt1{size_literal[0]}"]
+    test: { load: 1 }
+    compose:
+      - FnCall:
+          - "svldnt1_gather_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - 0
+
+  - name: svldnt1_gather[_{type[0]}base]_index_{type[1]}
+    doc: Unextended load, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+        - non_temporal
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [f32, i32, u32]]
+      - [u64, [f64, i64, u64]]
+    assert_instr: ["ldnt1{size_literal[0]}"]
+    test: { load: 1 }
+    compose:
+      - FnCall:
+          - "svldnt1_gather_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]]
+
+  - name: svldnt1s{size_literal[2]}_gather_[{type[0]}]index_{type[1]}
+    doc: Load {size[2]}-bit data and sign-extend, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - non_temporal
+    arguments:
+      ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [[i64, u64], [i64, u64], [i16, i32]]
+    assert_instr: ["ldnt1s{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - LLVMLink:
+          name: "ldnt1.gather.index.{sve_type[1] as {type[2]}}"
+          return_type: "{sve_type[1] as {type[2]}}"
+      - FnCall:
+          - simd_cast
+          - - FnCall: ["{llvm_link}", [$pg, $base, $indices]]
+
+  - name: svldnt1u{size_literal[2]}_gather_[{type[0]}]index_{type[1]}
+    doc: Load {size[2]}-bit data and zero-extend, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - non_temporal
+    arguments:
+      ["pg: {predicate[0]}", "base: *{type[2]}", "indices: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [[i64, u64], [u64, i64], [u16, u32]]
+    assert_instr: ["ldnt1{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - LLVMLink:
+          name: "ldnt1.gather.index.{sve_type[1] as {type[2]}}"
+          return_type: "{sve_type[1] as {type[2]}}"
+      - FnCall:
+          - simd_cast
+          - - FnCall: ["{llvm_link}", [$pg, $base, $indices]]
+          - - Type: "{sve_type[1] as {type[2]}}"
+            - _
+
+  - name: svldnt1s{size_literal[2]}_gather_[{type[0]}]offset_{type[1]}
+    doc: Load {size[2]}-bit data and sign-extend, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - non_temporal
+    arguments:
+      ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [i32, u32], [i8, i16]]
+      - [[i64, u64], [i64, u64], [i8, i16, i32]]
+    assert_instr: ["ldnt1s{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "ldnt1.gather.uxtw.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+            doubleword:
+              LLVMLink:
+                name: "ldnt1.gather.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+      - FnCall:
+          - simd_cast
+          - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]]
+
+  - name: svldnt1u{size_literal[2]}_gather_[{type[0]}]offset_{type[1]}
+    doc: Load {size[2]}-bit data and zero-extend, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - non_temporal
+    arguments:
+      ["pg: {predicate[0]}", "base: *{type[2]}", "offsets: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [u32, i32], [u8, u16]]
+      - [[i64, u64], [u64, i64], [u8, u16, u32]]
+    assert_instr: ["ldnt1{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "ldnt1.gather.uxtw.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+            doubleword:
+              LLVMLink:
+                name: "ldnt1.gather.{sve_type[1] as {type[2]}}"
+                return_type: "{sve_type[1] as {type[2]}}"
+      - FnCall:
+          - simd_cast
+          - - FnCall: ["{llvm_link}", [$pg, $base, $offsets]]
+          - - Type: "{sve_type[1] as {type[2]}}"
+            - _
+
+  - name: svldnt1s{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]}
+    doc: Load {size[2]}-bit data and sign-extend, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+        - non_temporal
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [i32, u32], [i8, i16]]
+      - [u64, [i64, u64], [i8, i16, i32]]
+    assert_instr: ["ldnt1s{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - LLVMLink:
+          name: "ldnt1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}"
+          return_type: "{sve_type[1] as {type[2]}}"
+      - FnCall:
+          - simd_cast
+          - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]]
+
+  - name: svldnt1u{size_literal[2]}_gather[_{type[0]}base]_offset_{type[1]}
+    doc: Load {size[2]}-bit data and zero-extend, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+        - non_temporal
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "offset: i64"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [u32, i32], [u8, u16]]
+      - [u64, [u64, i64], [u8, u16, u32]]
+    assert_instr: ["ldnt1{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - LLVMLink:
+          name: "ldnt1.gather.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}"
+          return_type: "{sve_type[1] as {type[2]}}"
+      - FnCall:
+          - simd_cast
+          - - FnCall: ["{llvm_link}", [$pg, $bases, $offset]]
+          - - Type: "{sve_type[1] as {type[2]}}"
+            - _
+
+  - name: svldnt1s{size_literal[2]}_gather[_{type[0]}base]_{type[1]}
+    doc: Load {size[2]}-bit data and sign-extend, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+        - non_temporal
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [i32, u32], [i8, i16]]
+      - [u64, [i64, u64], [i8, i16, i32]]
+    assert_instr: ["ldnt1s{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - FnCall:
+          - "svldnt1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - 0
+
+  - name: svldnt1u{size_literal[2]}_gather[_{type[0]}base]_{type[1]}
+    doc: Load {size[2]}-bit data and zero-extend, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+        - non_temporal
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [i32, u32], [u8, u16]]
+      - [u64, [i64, u64], [u8, u16, u32]]
+    assert_instr: ["ldnt1{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - FnCall:
+          - "svldnt1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - 0
+
+  - name: svldnt1s{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]}
+    doc: Load {size[2]}-bit data and sign-extend, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+        - non_temporal
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [i32, u32], u16]
+      - [u64, [i64, u64], [u16, u32]]
+    assert_instr: ["ldnt1s{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - FnCall:
+          - "svldnt1s{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]]
+
+  - name: svldnt1u{size_literal[2]}_gather[_{type[0]}base]_index_{type[1]}
+    doc: Load {size[2]}-bit data and zero-extend, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+        - non_temporal
+    arguments: ["pg: {predicate[0]}", "bases: {sve_type[0]}", "index: i64"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [u32, [i32, u32], u16]
+      - [u64, [i64, u64], [u16, u32]]
+    assert_instr: ["ldnt1{size_literal[2]}"]
+    test: { load: 2 }
+    compose:
+      - FnCall:
+          - "svldnt1u{size_literal[2]}_gather_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]]
+
+  - name: svstnt1_scatter_[{type[0]}]index[_{type[1]}]
+    doc: Non-truncating store, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - non_temporal
+    arguments:
+      - "pg: {predicate[0]}"
+      - "base: *mut {type[1]}"
+      - "indices: {sve_type[0]}"
+      - "data: {sve_type[1]}"
+    types:
+      - [[i64, u64], [f64, i64, u64]]
+    assert_instr: ["stnt1{size_literal[0]}"]
+    test: { store: 1 }
+    compose:
+      - LLVMLink:
+          name: "stnt1.scatter.index.{sve_type[1]}"
+          arguments:
+            - "data: {sve_type[1]}"
+            - "pg: {predicate[0]}"
+            - "base: *mut {type[1]}"
+            - "indices: {sve_type[0]}"
+      - FnCall: ["{llvm_link}", [$data, $pg, $base, $indices]]
+
+  - name: svstnt1_scatter_[{type[0]}]offset[_{type[1]}]
+    doc: Non-truncating store, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - non_temporal
+    arguments:
+      - "pg: {predicate[0]}"
+      - "base: *mut {type[1]}"
+      - "offsets: {sve_type[0]}"
+      - "data: {sve_type[1]}"
+    types:
+      - [u32, [f32, i32, u32]]
+      - [[i64, u64], [f64, i64, u64]]
+    assert_instr: ["stnt1{size_literal[0]}"]
+    test: { store: 1 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "stnt1.scatter.uxtw.{sve_type[1]}"
+                arguments:
+                  - "data: {sve_type[1]}"
+                  - "pg: {predicate[0]}"
+                  - "base: *mut {type[1]}"
+                  - "offsets: {sve_type[0]}"
+            doubleword:
+              LLVMLink:
+                name: "stnt1.scatter.{sve_type[1]}"
+                arguments:
+                  - "data: {sve_type[1]}"
+                  - "pg: {predicate[0]}"
+                  - "base: *mut {type[1]}"
+                  - "offsets: {sve_type[0]}"
+      - FnCall: ["{llvm_link}", [$data, $pg, $base, $offsets]]
+
+  - name: svstnt1_scatter[_{type[0]}base]_offset[_{type[1]}]
+    doc: Non-truncating store, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+        - non_temporal
+    arguments:
+      - "pg: {predicate[0]}"
+      - "bases: {sve_type[0]}"
+      - "offset: i64"
+      - "data: {sve_type[1]}"
+    types:
+      - [u32, [f32, i32, u32]]
+      - [u64, [f64, i64, u64]]
+    assert_instr: ["stnt1{size_literal[0]}"]
+    test: { store: 1 }
+    compose:
+      - LLVMLink:
+          arguments:
+            - "data: {sve_type[1]}"
+            - "pg: {predicate[0]}"
+            - "bases: {sve_type[0]}"
+            - "offset: i64"
+          name: "stnt1.scatter.scalar.offset.{sve_type[1]}.{sve_type[0]}"
+      - FnCall: ["{llvm_link}", [$data, $pg, $bases, $offset]]
+
+  - name: svstnt1_scatter[_{type[0]}base_{type[1]}]
+    doc: Non-truncating store, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+        - non_temporal
+    arguments:
+      ["pg: {predicate[0]}", "bases: {sve_type[0]}", "data: {sve_type[1]}"]
+    types:
+      - [u32, [f32, i32, u32]]
+      - [u64, [f64, i64, u64]]
+    assert_instr: ["stnt1{size_literal[0]}"]
+    test: { store: 1 }
+    compose:
+      - FnCall:
+          - "svstnt1_scatter_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - 0
+            - $data
+
+  - name: svstnt1_scatter[_{type[0]}base]_index[_{type[1]}]
+    doc: Non-truncating store, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+        - non_temporal
+    arguments:
+      - "pg: {predicate[0]}"
+      - "bases: {sve_type[0]}"
+      - "index: i64"
+      - "data: {sve_type[1]}"
+    types:
+      - [u32, [f32, i32, u32]]
+      - [u64, [f64, i64, u64]]
+    assert_instr: ["stnt1{size_literal[0]}"]
+    test: { store: 1 }
+    compose:
+      - FnCall:
+          - "svstnt1_scatter_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[0]}"]]
+            - $data
+
+  - name: svstnt1{size_literal[2]}_scatter_[{type[0]}]index[_{type[1]}]
+    doc: Truncate to {size[2]} bits and store, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - non_temporal
+    arguments:
+      - "pg: {predicate[0]}"
+      - "base: *mut {type[2]}"
+      - "indices: {sve_type[0]}"
+      - "data: {sve_type[1]}"
+    types:
+      - [[i64, u64], i64, [i16, i32]]
+      - [[i64, u64], u64, [u16, u32]]
+    assert_instr: ["stnt1{size_literal[2]}"]
+    test: { store: 2 }
+    compose:
+      - LLVMLink:
+          name: "stnt1.scatter.index.{sve_type[1] as {type[2]}}"
+          arguments:
+            - "data: {sve_type[1] as {type[2]}}"
+            - "pg: {predicate[0]}"
+            - "base: *mut {type[2]}"
+            - "indices: {sve_type[0]}"
+      - FnCall:
+          - "{llvm_link}"
+          - [FnCall: ["simd_cast", [$data]], $pg, $base, $indices]
+
+  - name: svstnt1{size_literal[2]}_scatter_[{type[0]}]offset[_{type[1]}]
+    doc: Truncate to {size[2]} bits and store, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - non_temporal
+    arguments:
+      - "pg: {predicate[0]}"
+      - "base: *mut {type[2]}"
+      - "offsets: {sve_type[0]}"
+      - "data: {sve_type[1]}"
+    types:
+      - [u32, i32, [i8, i16]]
+      - [u32, u32, [u8, u16]]
+      - [[i64, u64], i64, [i8, i16, i32]]
+      - [[i64, u64], u64, [u8, u16, u32]]
+    assert_instr: ["stnt1{size_literal[2]}"]
+    test: { store: 2 }
+    compose:
+      - MatchSize:
+          - "{type[0]}"
+          - default:
+              LLVMLink:
+                name: "stnt1.scatter.uxtw.{sve_type[1] as {type[2]}}"
+                arguments:
+                  - "data: {sve_type[1] as {type[2]}}"
+                  - "pg: {predicate[0]}"
+                  - "base: *mut {type[2]}"
+                  - "offsets: {sve_type[0]}"
+            doubleword:
+              LLVMLink:
+                name: "stnt1.scatter.{sve_type[1] as {type[2]}}"
+                arguments:
+                  - "data: {sve_type[1] as {type[2]}}"
+                  - "pg: {predicate[0]}"
+                  - "base: *mut {type[2]}"
+                  - "offsets: {sve_type[0]}"
+      - FnCall:
+          - "{llvm_link}"
+          - [FnCall: ["simd_cast", [$data]], $pg, $base, $offsets]
+
+  - name: svstnt1{size_literal[2]}_scatter[_{type[0]}base]_offset[_{type[1]}]
+    doc: Truncate to {size[2]} bits and store, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+        - non_temporal
+    arguments:
+      - "pg: {predicate[0]}"
+      - "bases: {sve_type[0]}"
+      - "offset: i64"
+      - "data: {sve_type[1]}"
+    types:
+      - [u32, [i32, u32], [i8, i16]]
+      - [u64, [i64, u64], [i8, i16, i32]]
+    assert_instr: ["stnt1{size_literal[2]}"]
+    test: { store: 2 }
+    compose:
+      - LLVMLink:
+          name: "stnt1.scatter.scalar.offset.{sve_type[1] as {type[2]}}.{sve_type[0]}"
+          arguments:
+            - "data: {sve_type[1] as {type[2]}}"
+            - "pg: {predicate[0]}"
+            - "bases: {sve_type[0]}"
+            - "offset: i64"
+      - FnCall:
+          - "{llvm_link}"
+          - [FnCall: ["simd_cast", [$data]], $pg, $bases, $offset]
+
+  - name: svstnt1{size_literal[2]}_scatter[_{type[0]}base_{type[1]}]
+    doc: Truncate to {size[2]} bits and store, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+        - non_temporal
+    arguments:
+      ["pg: {predicate[0]}", "bases: {sve_type[0]}", "data: {sve_type[1]}"]
+    types:
+      - [u32, [i32, u32], [i8, i16]]
+      - [u64, [i64, u64], [i8, i16, i32]]
+    assert_instr: ["stnt1{size_literal[2]}"]
+    test: { store: 2 }
+    compose:
+      - FnCall:
+          - "svstnt1{size_literal[2]}_scatter_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - 0
+            - $data
+
+  - name: svstnt1{size_literal[2]}_scatter[_{type[0]}base]_index[_{type[1]}]
+    doc: Truncate to {size[2]} bits and store, non-temporal
+    safety:
+      unsafe:
+        - pointer_offset: predicated
+        - dereference: predicated
+        - no_provenance: bases
+        - non_temporal
+    arguments:
+      - "pg: {predicate[0]}"
+      - "bases: {sve_type[0]}"
+      - "index: i64"
+      - "data: {sve_type[1]}"
+    types:
+      - [u32, [i32, u32], i16]
+      - [u64, [i64, u64], [i16, i32]]
+    assert_instr: ["stnt1{size_literal[2]}"]
+    test: { store: 2 }
+    compose:
+      - FnCall:
+          - "svstnt1{size_literal[2]}_scatter_{type[0]}base_offset_{type[1]}"
+          - - $pg
+            - $bases
+            - MethodCall: [$index, unchecked_shl, ["{size_in_bytes_log2[2]}"]]
+            - $data
+
+  - name: svaba[{_n}_{type}]
+    doc: Absolute difference and accumulate
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["{type_kind}aba"]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "{type_kind}aba.{sve_type}" }
+
+  - name: svqabs[_{type}]{_mxz}
+    doc: Saturating absolute value
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64]
+    assert_instr: [sqabs]
+    zeroing_method: { drop: inactive }
+    compose:
+      - LLVMLink: { name: "sqabs.{sve_type}" }
+
+  - name: svabdlb[{_n}_{type[0]}]
+    doc: Absolute difference long (bottom)
+    arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}abdlb"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}abdlb.{sve_type[0]}" }
+
+  - name: svabdlt[{_n}_{type[0]}]
+    doc: Absolute difference long (top)
+    arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}abdlt"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}abdlt.{sve_type[0]}" }
+
+  - name: svabalb[{_n}_{type[0]}]
+    doc: Absolute difference long (bottom)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}abalb"]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}abalb.{sve_type[0]}" }
+
+  - name: svabalt[{_n}_{type[0]}]
+    doc: Absolute difference long (top)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}abalt"]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}abalt.{sve_type[0]}" }
+
+  - name: svbcax[{_n}_{type}]
+    doc: Bitwise clear and exclusive OR
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    assert_instr: [bcax]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "bcax.{sve_type}" }
+
+  - name: sveorbt[{_n}_{type}]
+    doc: Interleaving exclusive OR (bottom, top)
+    arguments: ["odd: {sve_type}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    assert_instr: [eorbt]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "eorbt.{sve_type}" }
+
+  - name: sveortb[{_n}_{type}]
+    doc: Interleaving exclusive OR (top, bottom)
+    arguments: ["even: {sve_type}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    assert_instr: [eortb]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "eortb.{sve_type}" }
+
+  - name: sveor3[{_n}_{type}]
+    doc: Bitwise exclusive OR of three vectors
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    assert_instr: [eor3]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "eor3.{sve_type}" }
+
+  - name: svbsl[{_n}_{type}]
+    doc: Bitwise select
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    assert_instr: [bsl]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "bsl.{sve_type}" }
+
+  - name: svbsl1n[{_n}_{type}]
+    doc: Bitwise select with first input inverted
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    assert_instr: [bsl1n]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "bsl1n.{sve_type}" }
+
+  - name: svbsl2n[{_n}_{type}]
+    doc: Bitwise select with second input inverted
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    assert_instr: [bsl2n]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "bsl2n.{sve_type}" }
+
+  - name: svnbsl[{_n}_{type}]
+    doc: Bitwise select
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    assert_instr: [nbsl]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "nbsl.{sve_type}" }
+
+  - name: svxar[_n_{type}]
+    doc: Bitwise exclusive OR and rotate right
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    static_defs: ["const IMM3: i32"]
+    constraints: [{ variable: IMM3, range: ["1", "{size}"] }]
+    assert_instr: [[xar, "IMM3 = 1"]]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    compose:
+      - LLVMLink:
+          name: "xar.{sve_type}"
+          arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]]
+
+  - name: svrax1[_{type}]
+    doc: Bitwise rotate left by 1 and exclusive OR
+    target_features: [sve2-sha3]
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    assert_instr: [rax1]
+    types: [i64, u64]
+    compose:
+      - LLVMLink: { name: "rax1" }
+
+  - name: svshllb[_n_{type[0]}]
+    doc: Shift left long (bottom)
+    arguments: ["op1: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    static_defs: ["const IMM2: i32"]
+    constraints: [{ variable: IMM2, range: ["0", "{size_minus_one[1]}"] }]
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: [["{type_kind[0].su}shllb", "IMM2 = 0"]]
+    compose:
+      - LLVMLink:
+          name: "{type_kind[0].su}shllb.{sve_type[0]}"
+          arguments: ["op1: {sve_type[1]}", "imm2: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $IMM2]]
+
+  - name: svshllt[_n_{type[0]}]
+    doc: Shift left long (top)
+    arguments: ["op1: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    static_defs: ["const IMM2: i32"]
+    constraints: [{ variable: IMM2, range: ["0", "{size_minus_one[1]}"] }]
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: [["{type_kind[0].su}shllt", "IMM2 = 0"]]
+    compose:
+      - LLVMLink:
+          name: "{type_kind[0].su}shllt.{sve_type[0]}"
+          arguments: ["op1: {sve_type[1]}", "imm2: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $IMM2]]
+
+  - name: svrshl[{_n}_{type[0]}]{_mxz}
+    doc: Rounding shift left
+    arguments:
+      ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i8, u8], i8]
+      - [[i16, u16], i16]
+      - [[i32, u32], i32]
+      - [[i64, u64], i64]
+    assert_instr: ["{type_kind[0].su}rshl"]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}rshl.{sve_type[0]}" }
+
+  - name: svqrshl[{_n}_{type[0]}]{_mxz}
+    doc: Saturating rounding shift left
+    arguments:
+      ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i8, u8], i8]
+      - [[i16, u16], i16]
+      - [[i32, u32], i32]
+      - [[i64, u64], i64]
+    assert_instr: ["{type_kind[0].su}qrshl"]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}qrshl.{sve_type[0]}" }
+
+  - name: svqshl[{_n}_{type[0]}]{_mxz}
+    doc: Saturating shift left
+    arguments:
+      ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [[i8, u8], i8]
+      - [[i16, u16], i16]
+      - [[i32, u32], i32]
+      - [[i64, u64], i64]
+    assert_instr: ["{type_kind[0].su}qshl"]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}qshl.{sve_type[0]}" }
+
+  - name: svqshlu[_n_{type[0]}]{_mxz}
+    doc: Saturating shift left unsigned
+    arguments: ["pg: {predicate[0]}", "op1: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    static_defs: ["const IMM2: i32"]
+    constraints: [{ variable: IMM2, range: ["0", "{size_minus_one[1]}"] }]
+    types:
+      - [i8, u8]
+      - [i16, u16]
+      - [i32, u32]
+      - [i64, u64]
+    assert_instr: [[sqshlu, "IMM2 = 0"]]
+    zeroing_method: { select: op1 }
+    compose:
+      - LLVMLink:
+          name: "sqshlu.{sve_type[0]}"
+          arguments: ["pg: {predicate[0]}", "op1: {sve_type[0]}", "imm2: i32"]
+      - FnCall: ["{llvm_link}", [$pg, $op1, $IMM2]]
+
+  - name: svsli[_n_{type}]
+    doc: Shift left and insert
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    static_defs: ["const IMM3: i32"]
+    constraints: [{ variable: IMM3, range: ["0", "{size_minus_one}"] }]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [[sli, "IMM3 = 0"]]
+    compose:
+      - LLVMLink:
+          name: "sli.{sve_type}"
+          arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]]
+
+  - name: svrshr[_n_{type}]{_mxz}
+    doc: Rounding shift right
+    arguments: ["pg: {predicate}", "op1: {sve_type}"]
+    return_type: "{sve_type}"
+    static_defs: ["const IMM2: i32"]
+    constraints: [{ variable: IMM2, range: ["1", "{size}"] }]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [["{type_kind.su}rshr", "IMM2 = 1"]]
+    zeroing_method: { select: op1 }
+    compose:
+      - LLVMLink:
+          name: "{type_kind.su}rshr.{sve_type}"
+          arguments: ["pg: {predicate}", "op1: {sve_type}", "imm2: i32"]
+      - FnCall: ["{llvm_link}", [$pg, $op1, $IMM2]]
+
+  - name: svrsra[_n_{type}]
+    doc: Rounding shift right and accumulate
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    static_defs: ["const IMM3: i32"]
+    constraints: [{ variable: IMM3, range: ["1", "{size}"] }]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [["{type_kind.su}rsra", "IMM3 = 1"]]
+    compose:
+      - LLVMLink:
+          name: "{type_kind.su}rsra.{sve_type}"
+          arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]]
+
+  - name: svrshrnb[_n_{type[0]}]
+    doc: Rounding shift right narrow (bottom)
+    arguments: ["op1: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    static_defs: ["const IMM2: i32"]
+    constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }]
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: [[rshrnb, "IMM2 = 1"]]
+    compose:
+      - LLVMLink:
+          name: "rshrnb.{sve_type[0]}"
+          arguments: ["op1: {sve_type[0]}", "imm2: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $IMM2]]
+
+  - name: svrshrnt[_n_{type[0]}]
+    doc: Rounding shift right narrow (top)
+    arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    static_defs: ["const IMM2: i32"]
+    constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }]
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: [[rshrnt, "IMM2 = 1"]]
+    compose:
+      - LLVMLink:
+          name: "rshrnt.{sve_type[0]}"
+          arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"]
+      - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]]
+
+  - name: svqrshrnb[_n_{type[0]}]
+    doc: Saturating rounding shift right narrow (bottom)
+    arguments: ["op1: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    static_defs: ["const IMM2: i32"]
+    constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }]
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: [["{type_kind[0].su}qrshrnb", "IMM2 = 1"]]
+    compose:
+      - LLVMLink:
+          name: "{type_kind[0].su}qrshrnb.{sve_type[0]}"
+          arguments: ["op1: {sve_type[0]}", "imm2: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $IMM2]]
+
+  - name: svqrshrnt[_n_{type[0]}]
+    doc: Saturating rounding shift right narrow (top)
+    arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    static_defs: ["const IMM2: i32"]
+    constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }]
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: [["{type_kind[0].su}qrshrnt", "IMM2 = 1"]]
+    compose:
+      - LLVMLink:
+          name: "{type_kind[0].su}qrshrnt.{sve_type[0]}"
+          arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"]
+      - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]]
+
+  - name: svqrshrunb[_n_{type[0]}]
+    doc: Saturating rounding shift right unsigned narrow (bottom)
+    arguments: ["op1: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    static_defs: ["const IMM2: i32"]
+    constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }]
+    types:
+      - [i16, u8]
+      - [i32, u16]
+      - [i64, u32]
+    assert_instr: [[sqrshrunb, "IMM2 = 1"]]
+    compose:
+      - LLVMLink:
+          name: "sqrshrunb.{sve_type[0]}"
+          arguments: ["op1: {sve_type[0]}", "imm2: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $IMM2]]
+
+  - name: svqrshrunt[_n_{type[0]}]
+    doc: Saturating rounding shift right unsigned narrow (top)
+    arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    static_defs: ["const IMM2: i32"]
+    constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }]
+    types:
+      - [i16, u8]
+      - [i32, u16]
+      - [i64, u32]
+    assert_instr: [[sqrshrunt, "IMM2 = 1"]]
+    compose:
+      - LLVMLink:
+          name: "sqrshrunt.{sve_type[0]}"
+          arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"]
+      - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]]
+
+  - name: svqshrnb[_n_{type[0]}]
+    doc: Saturating shift right narrow (bottom)
+    arguments: ["op1: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    static_defs: ["const IMM2: i32"]
+    constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }]
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: [["{type_kind[0].su}qshrnb", "IMM2 = 1"]]
+    compose:
+      - LLVMLink:
+          name: "{type_kind[0].su}qshrnb.{sve_type[0]}"
+          arguments: ["op1: {sve_type[0]}", "imm2: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $IMM2]]
+
+  - name: svqshrnt[_n_{type[0]}]
+    doc: Saturating shift right narrow (top)
+    arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    static_defs: ["const IMM2: i32"]
+    constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }]
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: [["{type_kind[0].su}qshrnt", "IMM2 = 1"]]
+    compose:
+      - LLVMLink:
+          name: "{type_kind[0].su}qshrnt.{sve_type[0]}"
+          arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"]
+      - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]]
+
+  - name: svqshrunb[_n_{type[0]}]
+    doc: Saturating shift right unsigned narrow (bottom)
+    arguments: ["op1: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    static_defs: ["const IMM2: i32"]
+    constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }]
+    types:
+      - [i16, u8]
+      - [i32, u16]
+      - [i64, u32]
+    assert_instr: [[sqshrunb, "IMM2 = 1"]]
+    compose:
+      - LLVMLink:
+          name: "sqshrunb.{sve_type[0]}"
+          arguments: ["op1: {sve_type[0]}", "imm2: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $IMM2]]
+
+  - name: svqshrunt[_n_{type[0]}]
+    doc: Saturating shift right unsigned narrow (top)
+    arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    static_defs: ["const IMM2: i32"]
+    constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }]
+    types:
+      - [i16, u8]
+      - [i32, u16]
+      - [i64, u32]
+    assert_instr: [[sqshrunt, "IMM2 = 1"]]
+    compose:
+      - LLVMLink:
+          name: "sqshrunt.{sve_type[0]}"
+          arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"]
+      - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]]
+
+  - name: svsra[_n_{type}]
+    doc: Shift right and accumulate
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    static_defs: ["const IMM3: i32"]
+    constraints: [{ variable: IMM3, range: ["1", "{size}"] }]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [["{type_kind.su}sra", "IMM3 = 1"]]
+    compose:
+      - LLVMLink:
+          name: "{type_kind.su}sra.{sve_type}"
+          arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]]
+
+  - name: svsri[_n_{type}]
+    doc: Shift right and insert
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    static_defs: ["const IMM3: i32"]
+    constraints: [{ variable: IMM3, range: ["1", "{size}"] }]
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: [[sri, "IMM3 = 1"]]
+    compose:
+      - LLVMLink:
+          name: "sri.{sve_type}"
+          arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm3: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $op2, $IMM3]]
+
+  - name: svshrnb[_n_{type[0]}]
+    doc: Shift right narrow (bottom)
+    arguments: ["op1: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    static_defs: ["const IMM2: i32"]
+    constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }]
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: [[shrnb, "IMM2 = 1"]]
+    compose:
+      - LLVMLink:
+          name: "shrnb.{sve_type[0]}"
+          arguments: ["op1: {sve_type[0]}", "imm2: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $IMM2]]
+
+  - name: svshrnt[_n_{type[0]}]
+    doc: Shift right narrow (top)
+    arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    static_defs: ["const IMM2: i32"]
+    constraints: [{ variable: IMM2, range: ["1", "{size[1]}"] }]
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: [[shrnt, "IMM2 = 1"]]
+    compose:
+      - LLVMLink:
+          name: "shrnt.{sve_type[0]}"
+          arguments: ["even: {sve_type[1]}", "op1: {sve_type[0]}", "imm2: i32"]
+      - FnCall: ["{llvm_link}", [$even, $op1, $IMM2]]
+
+  - name: svqxtnb[_{type[0]}]
+    doc: Saturating extract narrow (bottom)
+    arguments: ["op: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}qxtnb"]
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}qxtnb.{sve_type[0]}" }
+
+  - name: svqxtnt[_{type[0]}]
+    doc: Saturating extract narrow (top)
+    arguments: ["even: {sve_type[1]}", "op: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}qxtnt"]
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}qxtnt.{sve_type[0]}" }
+
+  - name: svqxtunb[_{type[0]}]
+    doc: Saturating extract unsigned narrow (bottom)
+    arguments: ["op: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [i16, u8]
+      - [i32, u16]
+      - [i64, u32]
+    assert_instr: [sqxtunb]
+    compose:
+      - LLVMLink: { name: "sqxtunb.{sve_type[0]}" }
+
+  - name: svqxtunt[_{type[0]}]
+    doc: Saturating extract unsigned narrow (top)
+    arguments: ["even: {sve_type[1]}", "op: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types:
+      - [i16, u8]
+      - [i32, u16]
+      - [i64, u32]
+    assert_instr: [sqxtunt]
+    compose:
+      - LLVMLink: { name: "sqxtunt.{sve_type[0]}" }
+
+  - name: svmovlb[_{type[0]}]
+    doc: Move long (bottom)
+    arguments: ["op: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}shllb"]
+    compose:
+      - FnCall: ["svshllb_n_{type[0]}", [$op], [0]]
+
+  - name: svmovlt[_{type[0]}]
+    doc: Move long (top)
+    arguments: ["op: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}shllt"]
+    compose:
+      - FnCall: ["svshllt_n_{type[0]}", [$op], [0]]
+
+  - name: svunpkhi[_{type[0]}]
+    doc: Unpack and extend high half
+    arguments: ["op: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}unpkhi"]
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}unpkhi.{sve_type[0]}" }
+
+  - name: svunpkhi[_b]
+    doc: Unpack and extend high half
+    arguments: ["op: svbool_t"]
+    return_type: "svbool8_t"
+    assert_instr: [punpkhi]
+    compose:
+      - LLVMLink: { name: "punpkhi.nxv16i1" }
+
+  - name: svunpklo[_{type[0]}]
+    doc: Unpack and extend low half
+    arguments: ["op: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}unpklo"]
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}unpklo.{sve_type[0]}" }
+
+  - name: svunpklo[_b]
+    doc: Unpack and extend low half
+    arguments: ["op: svbool_t"]
+    return_type: "svbool8_t"
+    assert_instr: [punpklo]
+    compose:
+      - LLVMLink: { name: "punpklo.nxv16i1" }
+
+  - name: svaddp[_{type}]{_mx}
+    doc: Add pairwise
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["{type_kind.f}addp"]
+    compose:
+      - LLVMLink: { name: "{type_kind.f}addp.{sve_type}" }
+
+  - name: svadalp[_{type[0]}]{_mxz}
+    doc: Add and accumulate long pairwise
+    arguments:
+      ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}adalp"]
+    zeroing_method: { select: op1 }
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}adalp.{sve_type[0]}" }
+
+  - name: svmaxp[_{type}]{_mx}
+    doc: Maximum pairwise
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["{type_kind.fsu}maxp"]
+    compose:
+      - LLVMLink: { name: "{type_kind.fsu}maxp.{sve_type}" }
+
+  - name: svmaxnmp[_{type}]{_mx}
+    doc: Maximum number pairwise
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    assert_instr: ["fmaxnmp"]
+    compose:
+      - LLVMLink: { name: "fmaxnmp.{sve_type}" }
+
+  - name: svminp[_{type}]{_mx}
+    doc: Minimum pairwise
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["{type_kind.fsu}minp"]
+    compose:
+      - LLVMLink: { name: "{type_kind.fsu}minp.{sve_type}" }
+
+  - name: svminnmp[_{type}]{_mx}
+    doc: Minimum number pairwise
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [f32, f64]
+    assert_instr: ["fminnmp"]
+    compose:
+      - LLVMLink: { name: "fminnmp.{sve_type}" }
+
+  - name: svmul_lane[_{type}]
+    doc: Multiply
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }]
+    assert_instr: [["{type_kind.f}mul", "IMM_INDEX = 0"]]
+    types: [f32, f64, i16, i32, i64, u16, u32, u64]
+    compose:
+      - LLVMLink:
+          name: "{type_kind.f}mul.lane.{sve_type}"
+          arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_index: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]]
+
+  - name: svqdmulh[{_n}_{type}]
+    doc: Saturating doubling multiply high
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64]
+    assert_instr: [sqdmulh]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "sqdmulh.{sve_type}" }
+
+  - name: svqdmulh_lane[_{type}]
+    doc: Saturating doubling multiply high
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }]
+    assert_instr: [["sqdmulh", "IMM_INDEX = 0"]]
+    types: [i16, i32, i64]
+    compose:
+      - LLVMLink:
+          name: "sqdmulh.lane.{sve_type}"
+          arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_index: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]]
+
+  - name: svqrdmulh[{_n}_{type}]
+    doc: Saturating rounding doubling multiply high
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64]
+    assert_instr: [sqrdmulh]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "sqrdmulh.{sve_type}" }
+
+  - name: svqrdmulh_lane[_{type}]
+    doc: Saturating rounding doubling multiply high
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }]
+    assert_instr: [["sqrdmulh", "IMM_INDEX = 0"]]
+    types: [i16, i32, i64]
+    compose:
+      - LLVMLink:
+          name: "sqrdmulh.lane.{sve_type}"
+          arguments: ["op1: {sve_type}", "op2: {sve_type}", "imm_index: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]]
+
+  - name: svqdmullb[{_n}_{type[0]}]
+    doc: Saturating doubling multiply long (bottom)
+    arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[i16, i8], [i32, i16], [i64, i32]]
+    assert_instr: [sqdmullb]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "sqdmullb.{sve_type[0]}" }
+
+  - name: svqdmullb_lane[_{type[0]}]
+    doc: Saturating doubling multiply long (bottom)
+    arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }]
+    assert_instr: [["sqdmullb", "IMM_INDEX = 0"]]
+    types: [[i32, i16], [i64, i32]]
+    compose:
+      - LLVMLink:
+          name: "sqdmullb.lane.{sve_type[0]}"
+          arguments:
+            ["op1: {sve_type[1]}", "op2: {sve_type[1]}", "imm_index: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]]
+
+  - name: svqdmullt[{_n}_{type[0]}]
+    doc: Saturating doubling multiply long (top)
+    arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[i16, i8], [i32, i16], [i64, i32]]
+    assert_instr: [sqdmullt]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "sqdmullt.{sve_type[0]}" }
+
+  - name: svqdmullt_lane[_{type[0]}]
+    doc: Saturating doubling multiply long (top)
+    arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }]
+    assert_instr: [["sqdmullt", "IMM_INDEX = 0"]]
+    types: [[i32, i16], [i64, i32]]
+    compose:
+      - LLVMLink:
+          name: "sqdmullt.lane.{sve_type[0]}"
+          arguments:
+            ["op1: {sve_type[1]}", "op2: {sve_type[1]}", "imm_index: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $op2, IMM_INDEX]]
+
+  - name: svmullb[{_n}_{type[0]}]
+    doc: Multiply long (bottom)
+    arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}mullb"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}mullb.{sve_type[0]}" }
+
+  - name: svmullb_lane[_{type[0]}]
+    doc: Multiply long (bottom)
+    arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i32, i16]
+      - [i64, i32]
+      - [u32, u16]
+      - [u64, u32]
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }]
+    assert_instr: [["{type_kind[0].su}mullb", "IMM_INDEX = 0"]]
+    compose:
+      - LLVMLink:
+          name: "{type_kind[0].su}mullb.lane.{sve_type[0]}"
+          arguments:
+            ["op1: {sve_type[1]}", "op2: {sve_type[1]}", "imm_index: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $op2, $IMM_INDEX]]
+
+  - name: svmullt[{_n}_{type[0]}]
+    doc: Multiply long (top)
+    arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}mullt"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}mullt.{sve_type[0]}" }
+
+  - name: svmullt_lane[_{type[0]}]
+    doc: Multiply long (top)
+    arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i32, i16]
+      - [i64, i32]
+      - [u32, u16]
+      - [u64, u32]
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }]
+    assert_instr: [["{type_kind[0].su}mullt", "IMM_INDEX = 0"]]
+    compose:
+      - LLVMLink:
+          name: "{type_kind[0].su}mullt.lane.{sve_type[0]}"
+          arguments:
+            ["op1: {sve_type[1]}", "op2: {sve_type[1]}", "imm_index: i32"]
+      - FnCall: ["{llvm_link}", [$op1, $op2, $IMM_INDEX]]
+
+  - name: svrecpe[_{type}]{_mxz}
+    doc: Reciprocal estimate
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [u32]
+    assert_instr: [urecpe]
+    zeroing_method: { drop: inactive }
+    compose:
+      - LLVMLink: { name: "urecpe.{sve_type}" }
+
+  - name: svrsqrte[_{type}]{_mxz}
+    doc: Reciprocal square root estimate
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [u32]
+    assert_instr: [ursqrte]
+    zeroing_method: { drop: inactive }
+    compose:
+      - LLVMLink: { name: "ursqrte.{sve_type}" }
+
+  - name: svmla_lane[_{type}]
+    doc: Multiply-add, addend first
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }]
+    types: [i16, i32, i64, u16, u32, u64]
+    assert_instr: [[mla, "IMM_INDEX = 0"]]
+    compose:
+      - LLVMLink:
+          name: "mla.lane.{sve_type}"
+          arguments:
+            - "op1: {sve_type}"
+            - "op2: {sve_type}"
+            - "op3: {sve_type}"
+            - "IMM_INDEX: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]]
+
+  - name: svmls_lane[_{type}]
+    doc: Multiply-subtract, minuend first
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }]
+    types: [i16, i32, i64, u16, u32, u64]
+    assert_instr: [[mls, "IMM_INDEX = 0"]]
+    compose:
+      - LLVMLink:
+          name: "mls.lane.{sve_type}"
+          arguments:
+            - "op1: {sve_type}"
+            - "op2: {sve_type}"
+            - "op3: {sve_type}"
+            - "IMM_INDEX: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]]
+
+  - name: svmlalb[{_n}_{type[0]}]
+    doc: Multiply-add long (bottom)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]]
+    assert_instr: ["{type_kind[0].su}mlalb"]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}mlalb.{sve_type[0]}" }
+
+  - name: svmlalb_lane[_{type[0]}]
+    doc: Multiply-add long (bottom)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }]
+    types: [[i32, i16], [i64, i32], [u32, u16], [u64, u32]]
+    assert_instr: [["{type_kind[0].su}mlalb", "IMM_INDEX = 0"]]
+    compose:
+      - LLVMLink:
+          name: "{type_kind[0].su}mlalb.lane.{sve_type[0]}"
+          arguments:
+            - "op1: {sve_type[0]}"
+            - "op2: {sve_type[1]}"
+            - "op3: {sve_type[1]}"
+            - "IMM_INDEX: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]]
+
+  - name: svmlalt[{_n}_{type[0]}]
+    doc: Multiply-add long (top)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]]
+    assert_instr: ["{type_kind[0].su}mlalt"]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}mlalt.{sve_type[0]}" }
+
+  - name: svmlalt_lane[_{type[0]}]
+    doc: Multiply-add long (top)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }]
+    types: [[i32, i16], [i64, i32], [u32, u16], [u64, u32]]
+    assert_instr: [["{type_kind[0].su}mlalt", "IMM_INDEX = 0"]]
+    compose:
+      - LLVMLink:
+          name: "{type_kind[0].su}mlalt.lane.{sve_type[0]}"
+          arguments:
+            - "op1: {sve_type[0]}"
+            - "op2: {sve_type[1]}"
+            - "op3: {sve_type[1]}"
+            - "IMM_INDEX: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]]
+
+  - name: svmlslb[{_n}_{type[0]}]
+    doc: Multiply-subtract long (bottom)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]]
+    assert_instr: ["{type_kind[0].su}mlslb"]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}mlslb.{sve_type[0]}" }
+
+  - name: svmlslb_lane[_{type[0]}]
+    doc: Multiply-subtract long (bottom)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }]
+    types: [[i32, i16], [i64, i32], [u32, u16], [u64, u32]]
+    assert_instr: [["{type_kind[0].su}mlslb", "IMM_INDEX = 0"]]
+    compose:
+      - LLVMLink:
+          name: "{type_kind[0].su}mlslb.lane.{sve_type[0]}"
+          arguments:
+            - "op1: {sve_type[0]}"
+            - "op2: {sve_type[1]}"
+            - "op3: {sve_type[1]}"
+            - "IMM_INDEX: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]]
+
+  - name: svmlslt[{_n}_{type[0]}]
+    doc: Multiply-subtract long (top)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      [[i16, i8], [i32, i16], [i64, i32], [u16, u8], [u32, u16], [u64, u32]]
+    assert_instr: ["{type_kind[0].su}mlslt"]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}mlslt.{sve_type[0]}" }
+
+  - name: svmlslt_lane[_{type[0]}]
+    doc: Multiply-subtract long (top)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }]
+    types: [[i32, i16], [i64, i32], [u32, u16], [u64, u32]]
+    assert_instr: [["{type_kind[0].su}mlslt", "IMM_INDEX = 0"]]
+    compose:
+      - LLVMLink:
+          name: "{type_kind[0].su}mlslt.lane.{sve_type[0]}"
+          arguments:
+            - "op1: {sve_type[0]}"
+            - "op2: {sve_type[1]}"
+            - "op3: {sve_type[1]}"
+            - "IMM_INDEX: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]]
+
+  - name: svqrdmlah[{_n}_{type}]
+    doc: Saturating rounding doubling multiply-add high
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64]
+    assert_instr: [sqrdmlah]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "sqrdmlah.{sve_type}" }
+
+  - name: svqrdmlah_lane[_{type}]
+    doc: Saturating rounding doubling multiply-add high
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }]
+    types: [i16, i32, i64]
+    assert_instr: [[sqrdmlah, "IMM_INDEX = 0"]]
+    compose:
+      - LLVMLink:
+          name: "sqrdmlah.lane.{sve_type}"
+          arguments:
+            - "op1: {sve_type}"
+            - "op2: {sve_type}"
+            - "op3: {sve_type}"
+            - "IMM_INDEX: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]]
+
+  - name: svqrdmlsh[{_n}_{type}]
+    doc: Saturating rounding doubling multiply-subtract high
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64]
+    assert_instr: [sqrdmlsh]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "sqrdmlsh.{sve_type}" }
+
+  - name: svqrdmlsh_lane[_{type}]
+    doc: Saturating rounding doubling multiply-subtract high
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type}" }]
+    types: [i16, i32, i64]
+    assert_instr: [[sqrdmlsh, "IMM_INDEX = 0"]]
+    compose:
+      - LLVMLink:
+          name: "sqrdmlsh.lane.{sve_type}"
+          arguments:
+            - "op1: {sve_type}"
+            - "op2: {sve_type}"
+            - "op3: {sve_type}"
+            - "IMM_INDEX: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]]
+
+  - name: svqdmlalb[{_n}_{type[0]}]
+    doc: Saturating doubling multiply-add long (bottom)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[i16, i8], [i32, i16], [i64, i32]]
+    assert_instr: ["sqdmlalb"]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "sqdmlalb.{sve_type[0]}" }
+
+  - name: svqdmlalb_lane[_{type[0]}]
+    doc: Saturating doubling multiply-add long (bottom)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }]
+    types: [[i32, i16], [i64, i32]]
+    assert_instr: [["sqdmlalb", "IMM_INDEX = 0"]]
+    compose:
+      - LLVMLink:
+          name: "sqdmlalb.lane.{sve_type[0]}"
+          arguments:
+            - "op1: {sve_type[0]}"
+            - "op2: {sve_type[1]}"
+            - "op3: {sve_type[1]}"
+            - "IMM_INDEX: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]]
+
+  - name: svqdmlalbt[{_n}_{type[0]}]
+    doc: Saturating doubling multiply-add long (bottom × top)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[i16, i8], [i32, i16], [i64, i32]]
+    assert_instr: ["sqdmlalbt"]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "sqdmlalbt.{sve_type[0]}" }
+
+  - name: svqdmlalt[{_n}_{type[0]}]
+    doc: Saturating doubling multiply-add long (top)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[i16, i8], [i32, i16], [i64, i32]]
+    assert_instr: ["sqdmlalt"]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "sqdmlalt.{sve_type[0]}" }
+
+  - name: svqdmlalt_lane[_{type[0]}]
+    doc: Saturating doubling multiply-add long (top)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }]
+    types: [[i32, i16], [i64, i32]]
+    assert_instr: [["sqdmlalt", "IMM_INDEX = 0"]]
+    compose:
+      - LLVMLink:
+          name: "sqdmlalt.lane.{sve_type[0]}"
+          arguments:
+            - "op1: {sve_type[0]}"
+            - "op2: {sve_type[1]}"
+            - "op3: {sve_type[1]}"
+            - "IMM_INDEX: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]]
+
+  - name: svqdmlslb[{_n}_{type[0]}]
+    doc: Saturating doubling multiply-subtract long (bottom)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[i16, i8], [i32, i16], [i64, i32]]
+    assert_instr: ["sqdmlslb"]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "sqdmlslb.{sve_type[0]}" }
+
+  - name: svqdmlslb_lane[_{type[0]}]
+    doc: Saturating doubling multiply-subtract long (bottom)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }]
+    types: [[i32, i16], [i64, i32]]
+    assert_instr: [["sqdmlslb", "IMM_INDEX = 0"]]
+    compose:
+      - LLVMLink:
+          name: "sqdmlslb.lane.{sve_type[0]}"
+          arguments:
+            - "op1: {sve_type[0]}"
+            - "op2: {sve_type[1]}"
+            - "op3: {sve_type[1]}"
+            - "IMM_INDEX: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]]
+
+  - name: svqdmlslbt[{_n}_{type[0]}]
+    doc: Saturating doubling multiply-subtract long (bottom × top)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[i16, i8], [i32, i16], [i64, i32]]
+    assert_instr: ["sqdmlslbt"]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "sqdmlslbt.{sve_type[0]}" }
+
+  - name: svqdmlslt[{_n}_{type[0]}]
+    doc: Saturating doubling multiply-subtract long (top)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[i16, i8], [i32, i16], [i64, i32]]
+    assert_instr: ["sqdmlslt"]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "sqdmlslt.{sve_type[0]}" }
+
+  - name: svqdmlslt_lane[_{type[0]}]
+    doc: Saturating doubling multiply-subtract long (top)
+    arguments:
+      ["op1: {sve_type[0]}", "op2: {sve_type[1]}", "op3: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    static_defs: ["const IMM_INDEX: i32"]
+    constraints: [{ variable: IMM_INDEX, vec_max_elems_type: "{type[1]}" }]
+    types: [[i32, i16], [i64, i32]]
+    assert_instr: [["sqdmlslt", "IMM_INDEX = 0"]]
+    compose:
+      - LLVMLink:
+          name: "sqdmlslt.lane.{sve_type[0]}"
+          arguments:
+            - "op1: {sve_type[0]}"
+            - "op2: {sve_type[1]}"
+            - "op3: {sve_type[1]}"
+            - "IMM_INDEX: i32"
+      - FnCall: ["{llvm_link}", [$op1, $op2, $op3, $IMM_INDEX]]
+
+  - name: svqneg[_{type}]{_mxz}
+    doc: Saturating negate
+    arguments: ["inactive: {sve_type}", "pg: {predicate}", "op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64]
+    assert_instr: [sqneg]
+    zeroing_method: { drop: inactive }
+    compose:
+      - LLVMLink: { name: "sqneg.{sve_type}" }
+
+  - name: svadclb[{_n}_{type}]
+    doc: Add with carry long (bottom)
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [u32, u64]
+    assert_instr: [adclb]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "adclb.{sve_type}" }
+
+  - name: svadclt[{_n}_{type}]
+    doc: Add with carry long (top)
+    arguments: ["op1: {sve_type}", "op2: {sve_type}", "op3: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [u32, u64]
+    assert_instr: [adclt]
+    n_variant_op: op3
+    compose:
+      - LLVMLink: { name: "adclt.{sve_type}" }
+
+  - name: svqadd[{_n}_{type}]{_mxz}
+    doc: Saturating add
+    arguments: ["pg: {predicate}", "op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [i8, i16, i32, i64, u8, u16, u32, u64]
+    assert_instr: ["{type_kind.su}qadd"]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind.su}qadd.{sve_type}" }
+
+  - name: svsqadd[{_n}_{type[0]}]{_mxz}
+    doc: Saturating add with signed addend
+    arguments:
+      ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [u8, i8]
+      - [u16, i16]
+      - [u32, i32]
+      - [u64, i64]
+    assert_instr: [usqadd]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "usqadd.{sve_type[0]}" }
+
+  - name: svuqadd[{_n}_{type[0]}]{_mxz}
+    doc: Saturating add with unsigned addend
+    arguments:
+      ["pg: {predicate[0]}", "op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i8, u8]
+      - [i16, u16]
+      - [i32, u32]
+      - [i64, u64]
+    assert_instr: [suqadd]
+    zeroing_method: { select: op1 }
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "suqadd.{sve_type[0]}" }
+
+  - name: svaddlb[{_n}_{type[0]}]
+    doc: Add long (bottom)
+    arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}addlb"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}addlb.{sve_type[0]}" }
+
+  - name: svaddlbt[{_n}_{type[0]}]
+    doc: Add long (bottom + top)
+    arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+    assert_instr: ["{type_kind[0].su}addlbt"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}addlbt.{sve_type[0]}" }
+
+  - name: svaddlt[{_n}_{type[0]}]
+    doc: Add long (top)
+    arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}addlt"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}addlt.{sve_type[0]}" }
+
+  - name: svaddwb[{_n}_{type[0]}]
+    doc: Add wide (bottom)
+    arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}addwb"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}addwb.{sve_type[0]}" }
+
+  - name: svaddwt[{_n}_{type[0]}]
+    doc: Add wide (top)
+    arguments: ["op1: {sve_type[0]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types:
+      - [i16, i8]
+      - [i32, i16]
+      - [i64, i32]
+      - [u16, u8]
+      - [u32, u16]
+      - [u64, u32]
+    assert_instr: ["{type_kind[0].su}addwt"]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "{type_kind[0].su}addwt.{sve_type[0]}" }
+
+  - name: svlogb[_{type[0]}]{_mxz}
+    doc: Base 2 logarithm as integer
+    arguments:
+      ["inactive: {sve_type[1]}", "pg: {predicate[0]}", "op: {sve_type[0]}"]
+    return_type: "{sve_type[1]}"
+    types: [[f32, i32], [f64, i64]]
+    assert_instr: [flogb]
+    zeroing_method: { drop: inactive }
+    compose:
+      - LLVMLink: { name: "flogb.{sve_type[0]}" }
+
+  - name: svpmul[{_n}_{type}]
+    doc: Polynomial multiply
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [u8]
+    assert_instr: [pmul]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "pmul.{sve_type}" }
+
+  - name: svpmullb_pair[{_n}_{type}]
+    doc: Polynomial multiply long (bottom)
+    target_features: [sve2-aes]
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [u8, u32, u64]
+    assert_instr: [pmullb]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "pmullb.pair.{sve_type}" }
+
+  - name: svpmullb[{_n}_{type[0]}]
+    doc: Polynomial multiply long (bottom)
+    target_features: [sve2-aes]
+    arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[u16, u8], [u64, u32]]
+    assert_instr: [pmullb]
+    n_variant_op: op2
+    compose:
+      - FnCall:
+          - "simd_reinterpret"
+          - [FnCall: ["svpmullb_pair_{type[1]}", [$op1, $op2]]]
+
+  - name: svpmullt_pair[{_n}_{type}]
+    doc: Polynomial multiply long (top)
+    target_features: [sve2-aes]
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [u8, u32, u64]
+    assert_instr: [pmullt]
+    n_variant_op: op2
+    compose:
+      - LLVMLink: { name: "pmullt.pair.{sve_type}" }
+
+  - name: svpmullt[{_n}_{type[0]}]
+    doc: Polynomial multiply long (top)
+    target_features: [sve2-aes]
+    arguments: ["op1: {sve_type[1]}", "op2: {sve_type[1]}"]
+    return_type: "{sve_type[0]}"
+    types: [[u16, u8], [u64, u32]]
+    assert_instr: [pmullt]
+    n_variant_op: op2
+    compose:
+      - FnCall:
+          - "simd_reinterpret"
+          - [FnCall: ["svpmullt_pair_{type[1]}", [$op1, $op2]]]
+
+  - name: svaesd[_{type}]
+    doc: AES single round decryption
+    target_features: [sve2-aes]
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [u8]
+    assert_instr: [aesd]
+    compose:
+      - LLVMLink: { name: "aesd" }
+
+  - name: svaese[_{type}]
+    doc: AES single round encryption
+    target_features: [sve2-aes]
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [u8]
+    assert_instr: [aese]
+    compose:
+      - LLVMLink: { name: "aese" }
+
+  - name: svaesmc[_{type}]
+    doc: AES mix columns
+    target_features: [sve2-aes]
+    arguments: ["op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [u8]
+    assert_instr: [aesmc]
+    compose:
+      - LLVMLink: { name: "aesmc" }
+
+  - name: svaesimc[_{type}]
+    doc: AES inverse mix columns
+    target_features: [sve2-aes]
+    arguments: ["op: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [u8]
+    assert_instr: [aesimc]
+    compose:
+      - LLVMLink: { name: "aesimc" }
+
+  - name: svsm4e[_{type}]
+    doc: SM4 encryption and decryption
+    target_features: [sve2-sm4]
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [u32]
+    assert_instr: [sm4e]
+    compose:
+      - LLVMLink: { name: "sm4e" }
+
+  - name: svsm4ekey[_{type}]
+    doc: SM4 key updates
+    target_features: [sve2-sm4]
+    arguments: ["op1: {sve_type}", "op2: {sve_type}"]
+    return_type: "{sve_type}"
+    types: [u32]
+    assert_instr: [sm4ekey]
+    compose:
+      - LLVMLink: { name: "sm4ekey" }
diff --git a/crates/stdarch-gen2/src/assert_instr.rs b/crates/stdarch-gen2/src/assert_instr.rs
new file mode 100644
index 0000000000..ce1bbe8b55
--- /dev/null
+++ b/crates/stdarch-gen2/src/assert_instr.rs
@@ -0,0 +1,372 @@
+use proc_macro2::TokenStream;
+use quote::{format_ident, quote, ToTokens, TokenStreamExt};
+use serde::de::{self, MapAccess, Visitor};
+use serde::{ser::SerializeSeq, Deserialize, Deserializer, Serialize};
+use std::fmt;
+
+use crate::{
+    context::{self, Context},
+    typekinds::{BaseType, BaseTypeKind},
+    wildstring::WildString,
+};
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum InstructionAssertion {
+    Basic(WildString),
+    WithArgs(WildString, WildString),
+}
+
+impl InstructionAssertion {
+    fn build(&mut self, ctx: &Context) -> context::Result {
+        match self {
+            InstructionAssertion::Basic(ws) => ws.build_acle(ctx.local),
+            InstructionAssertion::WithArgs(ws, args_ws) => [ws, args_ws]
+                .into_iter()
+                .try_for_each(|ws| ws.build_acle(ctx.local)),
+        }
+    }
+}
+
+impl ToTokens for InstructionAssertion {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        let instr = format_ident!(
+            "{}",
+            match self {
+                Self::Basic(instr) => instr,
+                Self::WithArgs(instr, _) => instr,
+            }
+            .to_string()
+        );
+        tokens.append_all(quote! { #instr });
+
+        if let Self::WithArgs(_, args) = self {
+            let ex: TokenStream = args
+                .to_string()
+                .parse()
+                .expect("invalid instruction assertion arguments expression given");
+            tokens.append_all(quote! {, #ex})
+        }
+    }
+}
+
+// Asserts that the given instruction is present for the intrinsic of the associated type bitsize.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(remote = "Self")]
+pub struct InstructionAssertionMethodForBitsize {
+    pub default: InstructionAssertion,
+    pub byte: Option<InstructionAssertion>,
+    pub halfword: Option<InstructionAssertion>,
+    pub word: Option<InstructionAssertion>,
+    pub doubleword: Option<InstructionAssertion>,
+}
+
+impl InstructionAssertionMethodForBitsize {
+    fn build(&mut self, ctx: &Context) -> context::Result {
+        if let Some(ref mut byte) = self.byte {
+            byte.build(ctx)?
+        }
+        if let Some(ref mut halfword) = self.halfword {
+            halfword.build(ctx)?
+        }
+        if let Some(ref mut word) = self.word {
+            word.build(ctx)?
+        }
+        if let Some(ref mut doubleword) = self.doubleword {
+            doubleword.build(ctx)?
+        }
+        self.default.build(ctx)
+    }
+}
+
+impl Serialize for InstructionAssertionMethodForBitsize {
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: serde::Serializer,
+    {
+        match self {
+            InstructionAssertionMethodForBitsize {
+                default: InstructionAssertion::Basic(instr),
+                byte: None,
+                halfword: None,
+                word: None,
+                doubleword: None,
+            } => serializer.serialize_str(&instr.to_string()),
+            InstructionAssertionMethodForBitsize {
+                default: InstructionAssertion::WithArgs(instr, args),
+                byte: None,
+                halfword: None,
+                word: None,
+                doubleword: None,
+            } => {
+                let mut seq = serializer.serialize_seq(Some(2))?;
+                seq.serialize_element(&instr.to_string())?;
+                seq.serialize_element(&args.to_string())?;
+                seq.end()
+            }
+            _ => InstructionAssertionMethodForBitsize::serialize(self, serializer),
+        }
+    }
+}
+
+impl<'de> Deserialize<'de> for InstructionAssertionMethodForBitsize {
+    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+    where
+        D: Deserializer<'de>,
+    {
+        struct IAMVisitor;
+
+        impl<'de> Visitor<'de> for IAMVisitor {
+            type Value = InstructionAssertionMethodForBitsize;
+
+            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+                formatter.write_str("array, string or map")
+            }
+
+            fn visit_str<E>(self, value: &str) -> Result<InstructionAssertionMethodForBitsize, E>
+            where
+                E: de::Error,
+            {
+                Ok(InstructionAssertionMethodForBitsize {
+                    default: InstructionAssertion::Basic(value.parse().map_err(E::custom)?),
+                    byte: None,
+                    halfword: None,
+                    word: None,
+                    doubleword: None,
+                })
+            }
+
+            fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
+            where
+                A: de::SeqAccess<'de>,
+            {
+                use serde::de::Error;
+                let make_err =
+                    || Error::custom("invalid number of arguments passed to assert_instruction");
+                let instruction = seq.next_element()?.ok_or_else(make_err)?;
+                let args = seq.next_element()?.ok_or_else(make_err)?;
+
+                if let Some(true) = seq.size_hint().map(|len| len > 0) {
+                    Err(make_err())
+                } else {
+                    Ok(InstructionAssertionMethodForBitsize {
+                        default: InstructionAssertion::WithArgs(instruction, args),
+                        byte: None,
+                        halfword: None,
+                        word: None,
+                        doubleword: None,
+                    })
+                }
+            }
+
+            fn visit_map<M>(self, map: M) -> Result<InstructionAssertionMethodForBitsize, M::Error>
+            where
+                M: MapAccess<'de>,
+            {
+                InstructionAssertionMethodForBitsize::deserialize(
+                    de::value::MapAccessDeserializer::new(map),
+                )
+            }
+        }
+
+        deserializer.deserialize_any(IAMVisitor)
+    }
+}
+
+/// Asserts that the given instruction is present for the intrinsic of the associated type.
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(remote = "Self")]
+pub struct InstructionAssertionMethod {
+    /// Instruction for integer intrinsics
+    pub default: InstructionAssertionMethodForBitsize,
+    /// Instruction for floating-point intrinsics (optional)
+    #[serde(default)]
+    pub float: Option<InstructionAssertionMethodForBitsize>,
+    /// Instruction for unsigned integer intrinsics (optional)
+    #[serde(default)]
+    pub unsigned: Option<InstructionAssertionMethodForBitsize>,
+}
+
+impl InstructionAssertionMethod {
+    pub(crate) fn build(&mut self, ctx: &Context) -> context::Result {
+        if let Some(ref mut float) = self.float {
+            float.build(ctx)?
+        }
+        if let Some(ref mut unsigned) = self.unsigned {
+            unsigned.build(ctx)?
+        }
+        self.default.build(ctx)
+    }
+}
+
+impl Serialize for InstructionAssertionMethod {
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: serde::Serializer,
+    {
+        match self {
+            InstructionAssertionMethod {
+                default:
+                    InstructionAssertionMethodForBitsize {
+                        default: InstructionAssertion::Basic(instr),
+                        byte: None,
+                        halfword: None,
+                        word: None,
+                        doubleword: None,
+                    },
+                float: None,
+                unsigned: None,
+            } => serializer.serialize_str(&instr.to_string()),
+            InstructionAssertionMethod {
+                default:
+                    InstructionAssertionMethodForBitsize {
+                        default: InstructionAssertion::WithArgs(instr, args),
+                        byte: None,
+                        halfword: None,
+                        word: None,
+                        doubleword: None,
+                    },
+                float: None,
+                unsigned: None,
+            } => {
+                let mut seq = serializer.serialize_seq(Some(2))?;
+                seq.serialize_element(&instr.to_string())?;
+                seq.serialize_element(&args.to_string())?;
+                seq.end()
+            }
+            _ => InstructionAssertionMethod::serialize(self, serializer),
+        }
+    }
+}
+
+impl<'de> Deserialize<'de> for InstructionAssertionMethod {
+    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+    where
+        D: Deserializer<'de>,
+    {
+        struct IAMVisitor;
+
+        impl<'de> Visitor<'de> for IAMVisitor {
+            type Value = InstructionAssertionMethod;
+
+            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+                formatter.write_str("array, string or map")
+            }
+
+            fn visit_str<E>(self, value: &str) -> Result<InstructionAssertionMethod, E>
+            where
+                E: de::Error,
+            {
+                Ok(InstructionAssertionMethod {
+                    default: InstructionAssertionMethodForBitsize {
+                        default: InstructionAssertion::Basic(value.parse().map_err(E::custom)?),
+                        byte: None,
+                        halfword: None,
+                        word: None,
+                        doubleword: None,
+                    },
+                    float: None,
+                    unsigned: None,
+                })
+            }
+
+            fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
+            where
+                A: de::SeqAccess<'de>,
+            {
+                use serde::de::Error;
+                let make_err =
+                    || Error::custom("invalid number of arguments passed to assert_instruction");
+                let instruction = seq.next_element()?.ok_or_else(make_err)?;
+                let args = seq.next_element()?.ok_or_else(make_err)?;
+
+                if let Some(true) = seq.size_hint().map(|len| len > 0) {
+                    Err(make_err())
+                } else {
+                    Ok(InstructionAssertionMethod {
+                        default: InstructionAssertionMethodForBitsize {
+                            default: InstructionAssertion::WithArgs(instruction, args),
+                            byte: None,
+                            halfword: None,
+                            word: None,
+                            doubleword: None,
+                        },
+                        float: None,
+                        unsigned: None,
+                    })
+                }
+            }
+
+            fn visit_map<M>(self, map: M) -> Result<InstructionAssertionMethod, M::Error>
+            where
+                M: MapAccess<'de>,
+            {
+                InstructionAssertionMethod::deserialize(de::value::MapAccessDeserializer::new(map))
+            }
+        }
+
+        deserializer.deserialize_any(IAMVisitor)
+    }
+}
+
+#[derive(Debug)]
+pub struct InstructionAssertionsForBaseType<'a>(
+    pub &'a Vec<InstructionAssertionMethod>,
+    pub &'a Option<&'a BaseType>,
+);
+
+impl<'a> ToTokens for InstructionAssertionsForBaseType<'a> {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        self.0.iter().for_each(
+            |InstructionAssertionMethod {
+                 default,
+                 float,
+                 unsigned,
+             }| {
+                let kind = self.1.map(|ty| ty.kind());
+                let instruction = match (kind, float, unsigned) {
+                    (None, float, unsigned) if float.is_some() || unsigned.is_some() => {
+                        unreachable!(
+                        "cannot determine the base type kind for instruction assertion: {self:#?}")
+                    }
+                    (Some(BaseTypeKind::Float), Some(float), _) => float,
+                    (Some(BaseTypeKind::UInt), _, Some(unsigned)) => unsigned,
+                    _ => default,
+                };
+
+                let bitsize = self.1.and_then(|ty| ty.get_size().ok());
+                let instruction = match (bitsize, instruction) {
+                    (
+                        Some(8),
+                        InstructionAssertionMethodForBitsize {
+                            byte: Some(byte), ..
+                        },
+                    ) => byte,
+                    (
+                        Some(16),
+                        InstructionAssertionMethodForBitsize {
+                            halfword: Some(halfword),
+                            ..
+                        },
+                    ) => halfword,
+                    (
+                        Some(32),
+                        InstructionAssertionMethodForBitsize {
+                            word: Some(word), ..
+                        },
+                    ) => word,
+                    (
+                        Some(64),
+                        InstructionAssertionMethodForBitsize {
+                            doubleword: Some(doubleword),
+                            ..
+                        },
+                    ) => doubleword,
+                    (_, InstructionAssertionMethodForBitsize { default, .. }) => default,
+                };
+
+                tokens.append_all(quote! { #[cfg_attr(test, assert_instr(#instruction))]})
+            },
+        );
+    }
+}
diff --git a/crates/stdarch-gen2/src/context.rs b/crates/stdarch-gen2/src/context.rs
new file mode 100644
index 0000000000..108f7ab706
--- /dev/null
+++ b/crates/stdarch-gen2/src/context.rs
@@ -0,0 +1,249 @@
+use itertools::Itertools;
+use serde::{Deserialize, Serialize};
+use std::collections::HashMap;
+
+use crate::{
+    expression::Expression,
+    input::{InputSet, InputType},
+    intrinsic::{Constraint, Intrinsic, Signature},
+    matching::SizeMatchable,
+    predicate_forms::PredicateForm,
+    typekinds::{ToRepr, TypeKind},
+    wildcards::Wildcard,
+    wildstring::WildString,
+};
+
+/// Maximum SVE vector size
+const SVE_VECTOR_MAX_SIZE: u32 = 2048;
+/// Vector register size
+const VECTOR_REG_SIZE: u32 = 128;
+
+/// Generator result
+pub type Result<T = ()> = std::result::Result<T, String>;
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct ArchitectureSettings {
+    #[serde(alias = "arch")]
+    pub arch_name: String,
+    pub target_feature: Vec<String>,
+    #[serde(alias = "llvm_prefix")]
+    pub llvm_link_prefix: String,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct GlobalContext {
+    pub arch_cfgs: Vec<ArchitectureSettings>,
+    #[serde(default)]
+    pub uses_neon_types: bool,
+}
+
+/// Context of an intrinsic group
+#[derive(Debug, Clone, Default)]
+pub struct GroupContext {
+    /// LLVM links to target input sets
+    pub links: HashMap<String, InputSet>,
+}
+
+#[derive(Debug, Clone, Copy)]
+pub enum VariableType {
+    Argument,
+    Internal,
+}
+
+#[derive(Debug, Clone)]
+pub struct LocalContext {
+    pub signature: Signature,
+
+    pub input: InputSet,
+
+    pub substitutions: HashMap<Wildcard, String>,
+    pub variables: HashMap<String, (TypeKind, VariableType)>,
+}
+
+impl LocalContext {
+    pub fn new(input: InputSet, original: &Intrinsic) -> LocalContext {
+        LocalContext {
+            signature: original.signature.clone(),
+            input,
+            substitutions: HashMap::new(),
+            variables: HashMap::new(),
+        }
+    }
+
+    pub fn provide_type_wildcard(&self, wildcard: &Wildcard) -> Result<TypeKind> {
+        let err = || format!("wildcard {{{wildcard}}} not found");
+
+        let make_neon = |tuple_size| move |ty| TypeKind::make_vector(ty, false, tuple_size);
+        let make_sve = |tuple_size| move |ty| TypeKind::make_vector(ty, true, tuple_size);
+
+        match wildcard {
+            Wildcard::Type(idx) => self.input.typekind(*idx).ok_or_else(err),
+            Wildcard::NEONType(idx, tuple_size) => self
+                .input
+                .typekind(*idx)
+                .ok_or_else(err)
+                .and_then(make_neon(*tuple_size)),
+            Wildcard::SVEType(idx, tuple_size) => self
+                .input
+                .typekind(*idx)
+                .ok_or_else(err)
+                .and_then(make_sve(*tuple_size)),
+            Wildcard::Predicate(idx) => self.input.typekind(*idx).map_or_else(
+                || {
+                    if idx.is_none() && self.input.types_len() == 1 {
+                        Err(err())
+                    } else {
+                        Err(format!(
+                            "there is no type at index {} to infer the predicate from",
+                            idx.unwrap_or(0)
+                        ))
+                    }
+                },
+                |ref ty| TypeKind::make_predicate_from(ty),
+            ),
+            Wildcard::MaxPredicate => self
+                .input
+                .iter()
+                .filter_map(|arg| arg.typekind())
+                .max_by(|x, y| {
+                    x.base_type()
+                        .and_then(|bt| bt.get_size().ok())
+                        .unwrap_or(0)
+                        .cmp(&y.base_type().and_then(|bt| bt.get_size().ok()).unwrap_or(0))
+                })
+                .map_or_else(
+                    || Err("there are no types available to infer the predicate from".to_string()),
+                    TypeKind::make_predicate_from,
+                ),
+            Wildcard::Scale(w, as_ty) => {
+                let mut ty = self.provide_type_wildcard(w)?;
+                if let Some(vty) = ty.vector_mut() {
+                    let base_ty = if let Some(w) = as_ty.wildcard() {
+                        *self.provide_type_wildcard(w)?.base_type().unwrap()
+                    } else {
+                        *as_ty.base_type().unwrap()
+                    };
+                    vty.cast_base_type_as(base_ty)
+                }
+                Ok(ty)
+            }
+            _ => Err(err()),
+        }
+    }
+
+    pub fn provide_substitution_wildcard(&self, wildcard: &Wildcard) -> Result<String> {
+        let err = || Err(format!("wildcard {{{wildcard}}} not found"));
+
+        match wildcard {
+            Wildcard::SizeLiteral(idx) => self.input.typekind(*idx)
+                .map_or_else(err, |ty| Ok(ty.size_literal())),
+            Wildcard::Size(idx) => self.input.typekind(*idx)
+                .map_or_else(err, |ty| Ok(ty.size())),
+            Wildcard::SizeMinusOne(idx) => self.input.typekind(*idx)
+                .map_or_else(err, |ty| Ok((ty.size().parse::<i32>().unwrap()-1).to_string())),
+            Wildcard::SizeInBytesLog2(idx) => self.input.typekind(*idx)
+                .map_or_else(err, |ty| Ok(ty.size_in_bytes_log2())),
+            Wildcard::NVariant if self.substitutions.get(wildcard).is_none() => Ok(String::new()),
+            Wildcard::TypeKind(idx, opts) => {
+                self.input.typekind(*idx)
+                    .map_or_else(err, |ty| {
+                        let literal = if let Some(opts) = opts {
+                            opts.contains(ty.base_type().map(|bt| *bt.kind()).ok_or_else(|| {
+                                format!("cannot retrieve a type literal out of {ty}")
+                            })?)
+                            .then(|| ty.type_kind())
+                            .unwrap_or_default()
+                        } else {
+                            ty.type_kind()
+                        };
+                        Ok(literal)
+                    })
+            }
+            Wildcard::PredicateForms(_) => self
+                .input
+                .iter()
+                .find_map(|arg| {
+                    if let InputType::PredicateForm(pf) = arg {
+                        Some(pf.get_suffix().to_string())
+                    } else {
+                        None
+                    }
+                })
+                .ok_or_else(|| unreachable!("attempting to render a predicate form wildcard, but no predicate form was compiled for it")),
+            _ => self
+                .substitutions
+                .get(wildcard)
+                .map_or_else(err, |s| Ok(s.clone())),
+        }
+    }
+
+    pub fn make_assertion_from_constraint(&self, constraint: &Constraint) -> Result<Expression> {
+        match constraint {
+            Constraint::AnyI32 {
+                variable,
+                any_values,
+            } => {
+                let where_ex = any_values
+                    .iter()
+                    .map(|value| format!("{variable} == {value}"))
+                    .join(" || ");
+                Ok(Expression::MacroCall("static_assert".to_string(), where_ex))
+            }
+            Constraint::RangeI32 {
+                variable,
+                range: SizeMatchable::Matched(range),
+            } => Ok(Expression::MacroCall(
+                "static_assert_range".to_string(),
+                format!(
+                    "{variable}, {min}, {max}",
+                    min = range.start(),
+                    max = range.end()
+                ),
+            )),
+            Constraint::SVEMaxElems {
+                variable,
+                sve_max_elems_type: ty,
+            }
+            | Constraint::VecMaxElems {
+                variable,
+                vec_max_elems_type: ty,
+            } => {
+                if !self.input.is_empty() {
+                    let higher_limit = match constraint {
+                        Constraint::SVEMaxElems { .. } => SVE_VECTOR_MAX_SIZE,
+                        Constraint::VecMaxElems { .. } => VECTOR_REG_SIZE,
+                        _ => unreachable!(),
+                    };
+
+                    let max = ty.base_type()
+                        .map(|ty| ty.get_size())
+                        .transpose()?
+                        .map_or_else(
+                            || Err(format!("can't make an assertion out of constraint {self:?}: no valid type is present")),
+                            |bitsize| Ok(higher_limit / bitsize - 1))?;
+                    Ok(Expression::MacroCall(
+                        "static_assert_range".to_string(),
+                        format!("{variable}, 0, {max}"),
+                    ))
+                } else {
+                    Err(format!("can't make an assertion out of constraint {self:?}: no types are being used"))
+                }
+            }
+            _ => unreachable!("constraints were not built successfully!"),
+        }
+    }
+
+    pub fn predicate_form(&self) -> Option<&PredicateForm> {
+        self.input.iter().find_map(|arg| arg.predicate_form())
+    }
+
+    pub fn n_variant_op(&self) -> Option<&WildString> {
+        self.input.iter().find_map(|arg| arg.n_variant_op())
+    }
+}
+
+pub struct Context<'ctx> {
+    pub local: &'ctx mut LocalContext,
+    pub group: &'ctx mut GroupContext,
+    pub global: &'ctx GlobalContext,
+}
diff --git a/crates/stdarch-gen2/src/expression.rs b/crates/stdarch-gen2/src/expression.rs
new file mode 100644
index 0000000000..4434ae276e
--- /dev/null
+++ b/crates/stdarch-gen2/src/expression.rs
@@ -0,0 +1,546 @@
+use itertools::Itertools;
+use lazy_static::lazy_static;
+use proc_macro2::{Literal, TokenStream};
+use quote::{format_ident, quote, ToTokens, TokenStreamExt};
+use regex::Regex;
+use serde::de::{self, MapAccess, Visitor};
+use serde::{Deserialize, Deserializer, Serialize};
+use std::fmt;
+use std::str::FromStr;
+
+use crate::intrinsic::Intrinsic;
+use crate::{
+    context::{self, Context, VariableType},
+    intrinsic::{Argument, LLVMLink, StaticDefinition},
+    matching::{MatchKindValues, MatchSizeValues},
+    typekinds::{BaseType, BaseTypeKind, TypeKind},
+    wildcards::Wildcard,
+    wildstring::WildString,
+};
+
+#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
+pub enum IdentifierType {
+    Variable,
+    Symbol,
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum LetVariant {
+    Basic(WildString, Box<Expression>),
+    WithType(WildString, TypeKind, Box<Expression>),
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct FnCall(
+    /// Function pointer
+    pub Box<Expression>,
+    /// Function arguments
+    pub Vec<Expression>,
+    /// Function turbofish arguments
+    #[serde(default)]
+    pub Vec<Expression>,
+);
+
+impl FnCall {
+    pub fn new_expression(fn_ptr: Expression, arguments: Vec<Expression>) -> Expression {
+        FnCall(Box::new(fn_ptr), arguments, Vec::new()).into()
+    }
+
+    pub fn is_llvm_link_call(&self, llvm_link_name: &String) -> bool {
+        if let Expression::Identifier(fn_name, IdentifierType::Symbol) = self.0.as_ref() {
+            &fn_name.to_string() == llvm_link_name
+        } else {
+            false
+        }
+    }
+
+    pub fn pre_build(&mut self, ctx: &mut Context) -> context::Result {
+        self.0.pre_build(ctx)?;
+        self.1
+            .iter_mut()
+            .chain(self.2.iter_mut())
+            .try_for_each(|ex| ex.pre_build(ctx))
+    }
+
+    pub fn build(&mut self, intrinsic: &Intrinsic, ctx: &mut Context) -> context::Result {
+        self.0.build(intrinsic, ctx)?;
+        self.1
+            .iter_mut()
+            .chain(self.2.iter_mut())
+            .try_for_each(|ex| ex.build(intrinsic, ctx))
+    }
+}
+
+impl ToTokens for FnCall {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        let FnCall(fn_ptr, arguments, turbofish) = self;
+
+        fn_ptr.to_tokens(tokens);
+
+        if !turbofish.is_empty() {
+            tokens.append_all(quote! {::<#(#turbofish),*>});
+        }
+
+        tokens.append_all(quote! { (#(#arguments),*) })
+    }
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(remote = "Self", deny_unknown_fields)]
+pub enum Expression {
+    /// (Re)Defines a variable
+    Let(LetVariant),
+    /// Performs a variable assignment operation
+    Assign(String, Box<Expression>),
+    /// Performs a macro call
+    MacroCall(String, String),
+    /// Performs a function call
+    FnCall(FnCall),
+    /// Performs a method call. The following:
+    /// `MethodCall: ["$object", "to_string", []]`
+    /// is tokenized as:
+    /// `object.to_string()`.
+    MethodCall(Box<Expression>, String, Vec<Expression>),
+    /// Symbol identifier name, prepend with a `$` to treat it as a scope variable
+    /// which engages variable tracking and enables inference.
+    /// E.g. `my_function_name` for a generic symbol or `$my_variable` for
+    /// a variable.
+    Identifier(WildString, IdentifierType),
+    /// Constant signed integer number expression
+    IntConstant(i32),
+    /// Constant floating point number expression
+    FloatConstant(f32),
+    /// Constant boolean expression, either `true` or `false`
+    BoolConstant(bool),
+    /// Array expression
+    Array(Vec<Expression>),
+
+    // complex expressions
+    /// Makes an LLVM link.
+    ///
+    /// It stores the link's function name in the wildcard `{llvm_link}`, for use in
+    /// subsequent expressions.
+    LLVMLink(LLVMLink),
+    /// Casts the given expression to the specified (unchecked) type
+    CastAs(Box<Expression>, String),
+    /// Returns the LLVM `undef` symbol
+    SvUndef,
+    /// Multiplication
+    Multiply(Box<Expression>, Box<Expression>),
+    /// Converts the specified constant to the specified type's kind
+    ConvertConst(TypeKind, i32),
+    /// Yields the given type in the Rust representation
+    Type(TypeKind),
+
+    MatchSize(TypeKind, MatchSizeValues<Box<Expression>>),
+    MatchKind(TypeKind, MatchKindValues<Box<Expression>>),
+}
+
+impl Expression {
+    pub fn pre_build(&mut self, ctx: &mut Context) -> context::Result {
+        match self {
+            Self::FnCall(fn_call) => fn_call.pre_build(ctx),
+            Self::MethodCall(cl_ptr_ex, _, arg_exs) => {
+                cl_ptr_ex.pre_build(ctx)?;
+                arg_exs.iter_mut().try_for_each(|ex| ex.pre_build(ctx))
+            }
+            Self::Let(LetVariant::Basic(_, ex) | LetVariant::WithType(_, _, ex)) => {
+                ex.pre_build(ctx)
+            }
+            Self::CastAs(ex, _) => ex.pre_build(ctx),
+            Self::Multiply(lhs, rhs) => {
+                lhs.pre_build(ctx)?;
+                rhs.pre_build(ctx)
+            }
+            Self::MatchSize(match_ty, values) => {
+                *self = *values.get(match_ty, ctx.local)?.to_owned();
+                self.pre_build(ctx)
+            }
+            Self::MatchKind(match_ty, values) => {
+                *self = *values.get(match_ty, ctx.local)?.to_owned();
+                self.pre_build(ctx)
+            }
+            _ => Ok(()),
+        }
+    }
+
+    pub fn build(&mut self, intrinsic: &Intrinsic, ctx: &mut Context) -> context::Result {
+        match self {
+            Self::LLVMLink(link) => link.build_and_save(ctx),
+            Self::Identifier(identifier, id_type) => {
+                identifier.build_acle(ctx.local)?;
+
+                if let IdentifierType::Variable = id_type {
+                    ctx.local
+                        .variables
+                        .get(&identifier.to_string())
+                        .map(|_| ())
+                        .ok_or_else(|| format!("invalid variable {identifier} being referenced"))
+                } else {
+                    Ok(())
+                }
+            }
+            Self::FnCall(fn_call) => {
+                fn_call.build(intrinsic, ctx)?;
+
+                if let Some(llvm_link_name) = ctx.local.substitutions.get(&Wildcard::LLVMLink) {
+                    if fn_call.is_llvm_link_call(llvm_link_name) {
+                        *self = intrinsic
+                            .llvm_link()
+                            .expect("got LLVMLink wildcard without a LLVM link in `compose`")
+                            .apply_conversions_to_call(fn_call.clone(), ctx.local)?
+                    }
+                }
+
+                Ok(())
+            }
+            Self::MethodCall(cl_ptr_ex, _, arg_exs) => {
+                cl_ptr_ex.build(intrinsic, ctx)?;
+                arg_exs
+                    .iter_mut()
+                    .try_for_each(|ex| ex.build(intrinsic, ctx))
+            }
+            Self::Let(variant) => {
+                let (var_name, ex, ty) = match variant {
+                    LetVariant::Basic(var_name, ex) => (var_name, ex, None),
+                    LetVariant::WithType(var_name, ty, ex) => {
+                        if let Some(w) = ty.wildcard() {
+                            ty.populate_wildcard(ctx.local.provide_type_wildcard(w)?)?;
+                        }
+                        (var_name, ex, Some(ty.to_owned()))
+                    }
+                };
+
+                var_name.build_acle(ctx.local)?;
+                ctx.local.variables.insert(
+                    var_name.to_string(),
+                    (
+                        ty.unwrap_or_else(|| TypeKind::Custom("unknown".to_string())),
+                        VariableType::Internal,
+                    ),
+                );
+                ex.build(intrinsic, ctx)
+            }
+            Self::CastAs(ex, _) => ex.build(intrinsic, ctx),
+            Self::Multiply(lhs, rhs) => {
+                lhs.build(intrinsic, ctx)?;
+                rhs.build(intrinsic, ctx)
+            }
+            Self::ConvertConst(ty, num) => {
+                if let Some(w) = ty.wildcard() {
+                    *ty = ctx.local.provide_type_wildcard(w)?
+                }
+
+                if let Some(BaseType::Sized(BaseTypeKind::Float, _)) = ty.base() {
+                    *self = Expression::FloatConstant(*num as f32)
+                } else {
+                    *self = Expression::IntConstant(*num)
+                }
+                Ok(())
+            }
+            Self::Type(ty) => {
+                if let Some(w) = ty.wildcard() {
+                    *ty = ctx.local.provide_type_wildcard(w)?
+                }
+
+                Ok(())
+            }
+            _ => Ok(()),
+        }
+    }
+
+    /// True if the expression requires an `unsafe` context in a safe function.
+    ///
+    /// The classification is somewhat fuzzy, based on actual usage (e.g. empirical function names)
+    /// rather than a full parse. This is a reasonable approach because mistakes here will usually
+    /// be caught at build time:
+    ///
+    ///  - Missing an `unsafe` is a build error.
+    ///  - An unnecessary `unsafe` is a warning, made into an error by the CI's `-D warnings`.
+    ///
+    /// This **panics** if it encounters an expression that shouldn't appear in a safe function at
+    /// all (such as `SvUndef`).
+    pub fn requires_unsafe_wrapper(&self, ctx_fn: &str) -> bool {
+        match self {
+            // The call will need to be unsafe, but the declaration does not.
+            Self::LLVMLink(..) => false,
+            // Identifiers, literals and type names are never unsafe.
+            Self::Identifier(..) => false,
+            Self::IntConstant(..) => false,
+            Self::FloatConstant(..) => false,
+            Self::BoolConstant(..) => false,
+            Self::Type(..) => false,
+            Self::ConvertConst(..) => false,
+            // Nested structures that aren't inherently unsafe, but could contain other expressions
+            // that might be.
+            Self::Assign(_var, exp) => exp.requires_unsafe_wrapper(ctx_fn),
+            Self::Let(LetVariant::Basic(_, exp) | LetVariant::WithType(_, _, exp)) => {
+                exp.requires_unsafe_wrapper(ctx_fn)
+            }
+            Self::Array(exps) => exps.iter().any(|exp| exp.requires_unsafe_wrapper(ctx_fn)),
+            Self::Multiply(lhs, rhs) => {
+                lhs.requires_unsafe_wrapper(ctx_fn) || rhs.requires_unsafe_wrapper(ctx_fn)
+            }
+            Self::CastAs(exp, _ty) => exp.requires_unsafe_wrapper(ctx_fn),
+            // Functions and macros can be unsafe, but can also contain other expressions.
+            Self::FnCall(FnCall(fn_exp, args, turbo_args)) => {
+                let fn_name = fn_exp.to_string();
+                fn_exp.requires_unsafe_wrapper(ctx_fn)
+                    || fn_name.starts_with("_sv")
+                    || fn_name.starts_with("simd_")
+                    || fn_name.ends_with("transmute")
+                    || args.iter().any(|exp| exp.requires_unsafe_wrapper(ctx_fn))
+                    || turbo_args
+                        .iter()
+                        .any(|exp| exp.requires_unsafe_wrapper(ctx_fn))
+            }
+            Self::MethodCall(exp, fn_name, args) => match fn_name.as_str() {
+                // `as_signed` and `as_unsigned` are unsafe because they're trait methods with
+                // target features to allow use on feature-dependent types (such as SVE vectors).
+                // We can safely wrap them here.
+                "as_signed" => true,
+                "as_unsigned" => true,
+                _ => {
+                    exp.requires_unsafe_wrapper(ctx_fn)
+                        || args.iter().any(|exp| exp.requires_unsafe_wrapper(ctx_fn))
+                }
+            },
+            // We only use macros to check const generics (using static assertions).
+            Self::MacroCall(_name, _args) => false,
+            // Materialising uninitialised values is always unsafe, and we avoid it in safe
+            // functions.
+            Self::SvUndef => panic!("Refusing to wrap unsafe SvUndef in safe function '{ctx_fn}'."),
+            // Variants that aren't tokenised. We shouldn't encounter these here.
+            Self::MatchKind(..) => {
+                unimplemented!("The unsafety of {self:?} cannot be determined in '{ctx_fn}'.")
+            }
+            Self::MatchSize(..) => {
+                unimplemented!("The unsafety of {self:?} cannot be determined in '{ctx_fn}'.")
+            }
+        }
+    }
+}
+
+impl FromStr for Expression {
+    type Err = String;
+
+    fn from_str(s: &str) -> Result<Self, Self::Err> {
+        lazy_static! {
+            static ref MACRO_RE: Regex =
+                Regex::new(r"^(?P<name>[\w\d_]+)!\((?P<ex>.*?)\);?$").unwrap();
+        }
+
+        if s == "SvUndef" {
+            Ok(Expression::SvUndef)
+        } else if MACRO_RE.is_match(s) {
+            let c = MACRO_RE.captures(s).unwrap();
+            let ex = c["ex"].to_string();
+            let _: TokenStream = ex
+                .parse()
+                .map_err(|e| format!("could not parse macro call expression: {e:#?}"))?;
+            Ok(Expression::MacroCall(c["name"].to_string(), ex))
+        } else {
+            let (s, id_type) = if let Some(varname) = s.strip_prefix('$') {
+                (varname, IdentifierType::Variable)
+            } else {
+                (s, IdentifierType::Symbol)
+            };
+            let identifier = s.trim().parse()?;
+            Ok(Expression::Identifier(identifier, id_type))
+        }
+    }
+}
+
+impl From<FnCall> for Expression {
+    fn from(fn_call: FnCall) -> Self {
+        Expression::FnCall(fn_call)
+    }
+}
+
+impl From<WildString> for Expression {
+    fn from(ws: WildString) -> Self {
+        Expression::Identifier(ws, IdentifierType::Symbol)
+    }
+}
+
+impl From<&Argument> for Expression {
+    fn from(a: &Argument) -> Self {
+        Expression::Identifier(a.name.to_owned(), IdentifierType::Variable)
+    }
+}
+
+impl TryFrom<&StaticDefinition> for Expression {
+    type Error = String;
+
+    fn try_from(sd: &StaticDefinition) -> Result<Self, Self::Error> {
+        match sd {
+            StaticDefinition::Constant(imm) => Ok(imm.into()),
+            StaticDefinition::Generic(t) => t.parse(),
+        }
+    }
+}
+
+impl fmt::Display for Expression {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            Self::Identifier(identifier, kind) => {
+                write!(
+                    f,
+                    "{}{identifier}",
+                    matches!(kind, IdentifierType::Variable)
+                        .then_some("$")
+                        .unwrap_or_default()
+                )
+            }
+            Self::MacroCall(name, expression) => {
+                write!(f, "{name}!({expression})")
+            }
+            _ => Err(fmt::Error),
+        }
+    }
+}
+
+impl ToTokens for Expression {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        match self {
+            Self::Let(LetVariant::Basic(var_name, exp)) => {
+                let var_ident = format_ident!("{}", var_name.to_string());
+                tokens.append_all(quote! { let #var_ident = #exp })
+            }
+            Self::Let(LetVariant::WithType(var_name, ty, exp)) => {
+                let var_ident = format_ident!("{}", var_name.to_string());
+                tokens.append_all(quote! { let #var_ident: #ty = #exp })
+            }
+            Self::Assign(var_name, exp) => {
+                let var_ident = format_ident!("{}", var_name);
+                tokens.append_all(quote! { #var_ident = #exp })
+            }
+            Self::MacroCall(name, ex) => {
+                let name = format_ident!("{name}");
+                let ex: TokenStream = ex.parse().unwrap();
+                tokens.append_all(quote! { #name!(#ex) })
+            }
+            Self::FnCall(fn_call) => fn_call.to_tokens(tokens),
+            Self::MethodCall(exp, fn_name, args) => {
+                let fn_ident = format_ident!("{}", fn_name);
+                tokens.append_all(quote! { #exp.#fn_ident(#(#args),*) })
+            }
+            Self::Identifier(identifier, _) => {
+                assert!(
+                    !identifier.has_wildcards(),
+                    "expression {self:#?} was not built before calling to_tokens"
+                );
+                identifier
+                    .to_string()
+                    .parse::<TokenStream>()
+                    .expect("invalid syntax")
+                    .to_tokens(tokens);
+            }
+            Self::IntConstant(n) => tokens.append(Literal::i32_unsuffixed(*n)),
+            Self::FloatConstant(n) => tokens.append(Literal::f32_unsuffixed(*n)),
+            Self::BoolConstant(true) => tokens.append(format_ident!("true")),
+            Self::BoolConstant(false) => tokens.append(format_ident!("false")),
+            Self::Array(vec) => tokens.append_all(quote! { [ #(#vec),* ] }),
+            Self::LLVMLink(link) => link.to_tokens(tokens),
+            Self::CastAs(ex, ty) => {
+                let ty: TokenStream = ty.parse().expect("invalid syntax");
+                tokens.append_all(quote! { #ex as #ty })
+            }
+            Self::SvUndef => tokens.append_all(quote! { simd_reinterpret(()) }),
+            Self::Multiply(lhs, rhs) => tokens.append_all(quote! { #lhs * #rhs }),
+            Self::Type(ty) => ty.to_tokens(tokens),
+            _ => unreachable!("{self:?} cannot be converted to tokens."),
+        }
+    }
+}
+
+impl Serialize for Expression {
+    fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        S: serde::Serializer,
+    {
+        match self {
+            Self::IntConstant(v) => serializer.serialize_i32(*v),
+            Self::FloatConstant(v) => serializer.serialize_f32(*v),
+            Self::BoolConstant(v) => serializer.serialize_bool(*v),
+            Self::Identifier(..) => serializer.serialize_str(&self.to_string()),
+            Self::MacroCall(..) => serializer.serialize_str(&self.to_string()),
+            _ => Expression::serialize(self, serializer),
+        }
+    }
+}
+
+impl<'de> Deserialize<'de> for Expression {
+    fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+    where
+        D: Deserializer<'de>,
+    {
+        struct CustomExpressionVisitor;
+
+        impl<'de> Visitor<'de> for CustomExpressionVisitor {
+            type Value = Expression;
+
+            fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+                formatter.write_str("integer, float, boolean, string or map")
+            }
+
+            fn visit_bool<E>(self, v: bool) -> Result<Self::Value, E>
+            where
+                E: de::Error,
+            {
+                Ok(Expression::BoolConstant(v))
+            }
+
+            fn visit_i64<E>(self, v: i64) -> Result<Self::Value, E>
+            where
+                E: de::Error,
+            {
+                Ok(Expression::IntConstant(v as i32))
+            }
+
+            fn visit_u64<E>(self, v: u64) -> Result<Self::Value, E>
+            where
+                E: de::Error,
+            {
+                Ok(Expression::IntConstant(v as i32))
+            }
+
+            fn visit_f64<E>(self, v: f64) -> Result<Self::Value, E>
+            where
+                E: de::Error,
+            {
+                Ok(Expression::FloatConstant(v as f32))
+            }
+
+            fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
+            where
+                E: de::Error,
+            {
+                FromStr::from_str(value).map_err(de::Error::custom)
+            }
+
+            fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
+            where
+                A: de::SeqAccess<'de>,
+            {
+                let arr = std::iter::from_fn(|| seq.next_element::<Self::Value>().transpose())
+                    .try_collect()?;
+                Ok(Expression::Array(arr))
+            }
+
+            fn visit_map<M>(self, map: M) -> Result<Expression, M::Error>
+            where
+                M: MapAccess<'de>,
+            {
+                // `MapAccessDeserializer` is a wrapper that turns a `MapAccess`
+                // into a `Deserializer`, allowing it to be used as the input to T's
+                // `Deserialize` implementation. T then deserializes itself using
+                // the entries from the map visitor.
+                Expression::deserialize(de::value::MapAccessDeserializer::new(map))
+            }
+        }
+
+        deserializer.deserialize_any(CustomExpressionVisitor)
+    }
+}
diff --git a/crates/stdarch-gen2/src/input.rs b/crates/stdarch-gen2/src/input.rs
new file mode 100644
index 0000000000..bb2414adec
--- /dev/null
+++ b/crates/stdarch-gen2/src/input.rs
@@ -0,0 +1,432 @@
+use itertools::Itertools;
+use serde::{de, Deserialize, Deserializer, Serialize};
+
+use crate::{
+    context::{self, GlobalContext},
+    intrinsic::Intrinsic,
+    predicate_forms::{PredicateForm, PredicationMask, PredicationMethods},
+    typekinds::TypeKind,
+    wildstring::WildString,
+};
+
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum InputType {
+    /// PredicateForm variant argument
+    #[serde(skip)] // Predicate forms have their own dedicated deserialization field. Skip.
+    PredicateForm(PredicateForm),
+    /// Operand from which to generate an N variant
+    #[serde(skip)]
+    NVariantOp(Option<WildString>),
+    /// TypeKind variant argument
+    Type(TypeKind),
+}
+
+impl InputType {
+    /// Optionally unwraps as a PredicateForm.
+    pub fn predicate_form(&self) -> Option<&PredicateForm> {
+        match self {
+            InputType::PredicateForm(pf) => Some(pf),
+            _ => None,
+        }
+    }
+
+    /// Optionally unwraps as a mutable PredicateForm
+    pub fn predicate_form_mut(&mut self) -> Option<&mut PredicateForm> {
+        match self {
+            InputType::PredicateForm(pf) => Some(pf),
+            _ => None,
+        }
+    }
+
+    /// Optionally unwraps as a TypeKind.
+    pub fn typekind(&self) -> Option<&TypeKind> {
+        match self {
+            InputType::Type(ty) => Some(ty),
+            _ => None,
+        }
+    }
+
+    /// Optionally unwraps as a NVariantOp
+    pub fn n_variant_op(&self) -> Option<&WildString> {
+        match self {
+            InputType::NVariantOp(Some(op)) => Some(op),
+            _ => None,
+        }
+    }
+}
+
+impl PartialOrd for InputType {
+    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+impl Ord for InputType {
+    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+        use std::cmp::Ordering::*;
+
+        match (self, other) {
+            (InputType::PredicateForm(pf1), InputType::PredicateForm(pf2)) => pf1.cmp(pf2),
+            (InputType::Type(ty1), InputType::Type(ty2)) => ty1.cmp(ty2),
+
+            (InputType::NVariantOp(None), InputType::NVariantOp(Some(..))) => Less,
+            (InputType::NVariantOp(Some(..)), InputType::NVariantOp(None)) => Greater,
+            (InputType::NVariantOp(_), InputType::NVariantOp(_)) => Equal,
+
+            (InputType::Type(..), InputType::PredicateForm(..)) => Less,
+            (InputType::PredicateForm(..), InputType::Type(..)) => Greater,
+
+            (InputType::Type(..), InputType::NVariantOp(..)) => Less,
+            (InputType::NVariantOp(..), InputType::Type(..)) => Greater,
+
+            (InputType::PredicateForm(..), InputType::NVariantOp(..)) => Less,
+            (InputType::NVariantOp(..), InputType::PredicateForm(..)) => Greater,
+        }
+    }
+}
+
+mod many_or_one {
+    use serde::{de::Deserializer, ser::Serializer, Deserialize, Serialize};
+
+    pub fn serialize<T, S>(vec: &Vec<T>, serializer: S) -> Result<S::Ok, S::Error>
+    where
+        T: Serialize,
+        S: Serializer,
+    {
+        if vec.len() == 1 {
+            vec.first().unwrap().serialize(serializer)
+        } else {
+            vec.serialize(serializer)
+        }
+    }
+
+    pub fn deserialize<'de, T, D>(deserializer: D) -> Result<Vec<T>, D::Error>
+    where
+        T: Deserialize<'de>,
+        D: Deserializer<'de>,
+    {
+        #[derive(Debug, Clone, Serialize, Deserialize)]
+        #[serde(untagged)]
+        enum ManyOrOne<T> {
+            Many(Vec<T>),
+            One(T),
+        }
+
+        match ManyOrOne::deserialize(deserializer)? {
+            ManyOrOne::Many(vec) => Ok(vec),
+            ManyOrOne::One(val) => Ok(vec![val]),
+        }
+    }
+}
+
+#[derive(Debug, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
+pub struct InputSet(#[serde(with = "many_or_one")] Vec<InputType>);
+
+impl InputSet {
+    pub fn get(&self, idx: usize) -> Option<&InputType> {
+        self.0.get(idx)
+    }
+
+    pub fn is_empty(&self) -> bool {
+        self.0.is_empty()
+    }
+
+    pub fn iter(&self) -> impl Iterator<Item = &InputType> + '_ {
+        self.0.iter()
+    }
+
+    pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut InputType> + '_ {
+        self.0.iter_mut()
+    }
+
+    pub fn into_iter(self) -> impl Iterator<Item = InputType> + Clone {
+        self.0.into_iter()
+    }
+
+    pub fn types_len(&self) -> usize {
+        self.iter().filter_map(|arg| arg.typekind()).count()
+    }
+
+    pub fn typekind(&self, idx: Option<usize>) -> Option<TypeKind> {
+        let types_len = self.types_len();
+        self.get(idx.unwrap_or(0)).and_then(move |arg: &InputType| {
+            if (idx.is_none() && types_len != 1) || (idx.is_some() && types_len == 1) {
+                None
+            } else {
+                arg.typekind().cloned()
+            }
+        })
+    }
+}
+
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+pub struct InputSetEntry(#[serde(with = "many_or_one")] Vec<InputSet>);
+
+impl InputSetEntry {
+    pub fn new(input: Vec<InputSet>) -> Self {
+        Self(input)
+    }
+
+    pub fn get(&self, idx: usize) -> Option<&InputSet> {
+        self.0.get(idx)
+    }
+}
+
+fn validate_types<'de, D>(deserializer: D) -> Result<Vec<InputSetEntry>, D::Error>
+where
+    D: Deserializer<'de>,
+{
+    let v: Vec<InputSetEntry> = Vec::deserialize(deserializer)?;
+
+    let mut it = v.iter();
+    if let Some(first) = it.next() {
+        it.try_fold(first, |last, cur| {
+            if last.0.len() == cur.0.len() {
+                Ok(cur)
+            } else {
+                Err("the length of the InputSets and the product lists must match".to_string())
+            }
+        })
+        .map_err(de::Error::custom)?;
+    }
+
+    Ok(v)
+}
+
+#[derive(Debug, Clone, Default, Serialize, Deserialize)]
+pub struct IntrinsicInput {
+    #[serde(default)]
+    #[serde(deserialize_with = "validate_types")]
+    pub types: Vec<InputSetEntry>,
+
+    #[serde(flatten)]
+    pub predication_methods: PredicationMethods,
+
+    /// Generates a _n variant where the specified operand is a primitive type
+    /// that requires conversion to an SVE one. The `{_n}` wildcard is required
+    /// in the intrinsic's name, otherwise an error will be thrown.
+    #[serde(default)]
+    pub n_variant_op: WildString,
+}
+
+impl IntrinsicInput {
+    /// Extracts all the possible variants as an iterator.
+    pub fn variants(
+        &self,
+        intrinsic: &Intrinsic,
+    ) -> context::Result<impl Iterator<Item = InputSet> + '_> {
+        let mut top_product = vec![];
+
+        if !self.types.is_empty() {
+            top_product.push(
+                self.types
+                    .iter()
+                    .flat_map(|ty_in| {
+                        ty_in
+                            .0
+                            .iter()
+                            .map(|v| v.clone().into_iter())
+                            .multi_cartesian_product()
+                    })
+                    .collect_vec(),
+            )
+        }
+
+        if let Ok(mask) = PredicationMask::try_from(&intrinsic.signature.name) {
+            top_product.push(
+                PredicateForm::compile_list(&mask, &self.predication_methods)?
+                    .into_iter()
+                    .map(|pf| vec![InputType::PredicateForm(pf)])
+                    .collect_vec(),
+            )
+        }
+
+        if !self.n_variant_op.is_empty() {
+            top_product.push(vec![
+                vec![InputType::NVariantOp(None)],
+                vec![InputType::NVariantOp(Some(self.n_variant_op.to_owned()))],
+            ])
+        }
+
+        let it = top_product
+            .into_iter()
+            .map(|v| v.into_iter())
+            .multi_cartesian_product()
+            .map(|set| InputSet(set.into_iter().flatten().collect_vec()));
+        Ok(it)
+    }
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct GeneratorInput {
+    #[serde(flatten)]
+    pub ctx: GlobalContext,
+    pub intrinsics: Vec<Intrinsic>,
+}
+
+#[cfg(test)]
+mod tests {
+    use crate::{
+        input::*,
+        predicate_forms::{DontCareMethod, ZeroingMethod},
+    };
+
+    #[test]
+    fn test_empty() {
+        let str = r#"types: []"#;
+        let input: IntrinsicInput = serde_yaml::from_str(str).expect("failed to parse");
+        let mut variants = input.variants(&Intrinsic::default()).unwrap().into_iter();
+        assert_eq!(variants.next(), None);
+    }
+
+    #[test]
+    fn test_product() {
+        let str = r#"types:
+- [f64, f32]
+- [i64, [f64, f32]]
+"#;
+        let input: IntrinsicInput = serde_yaml::from_str(str).expect("failed to parse");
+        let mut intrinsic = Intrinsic::default();
+        intrinsic.signature.name = "test_intrinsic{_mx}".parse().unwrap();
+        let mut variants = input.variants(&intrinsic).unwrap().into_iter();
+        assert_eq!(
+            variants.next(),
+            Some(InputSet(vec![
+                InputType::Type("f64".parse().unwrap()),
+                InputType::Type("f32".parse().unwrap()),
+                InputType::PredicateForm(PredicateForm::Merging),
+            ]))
+        );
+        assert_eq!(
+            variants.next(),
+            Some(InputSet(vec![
+                InputType::Type("f64".parse().unwrap()),
+                InputType::Type("f32".parse().unwrap()),
+                InputType::PredicateForm(PredicateForm::DontCare(DontCareMethod::AsMerging)),
+            ]))
+        );
+        assert_eq!(
+            variants.next(),
+            Some(InputSet(vec![
+                InputType::Type("i64".parse().unwrap()),
+                InputType::Type("f64".parse().unwrap()),
+                InputType::PredicateForm(PredicateForm::Merging),
+            ]))
+        );
+        assert_eq!(
+            variants.next(),
+            Some(InputSet(vec![
+                InputType::Type("i64".parse().unwrap()),
+                InputType::Type("f64".parse().unwrap()),
+                InputType::PredicateForm(PredicateForm::DontCare(DontCareMethod::AsMerging)),
+            ]))
+        );
+        assert_eq!(
+            variants.next(),
+            Some(InputSet(vec![
+                InputType::Type("i64".parse().unwrap()),
+                InputType::Type("f32".parse().unwrap()),
+                InputType::PredicateForm(PredicateForm::Merging),
+            ]))
+        );
+        assert_eq!(
+            variants.next(),
+            Some(InputSet(vec![
+                InputType::Type("i64".parse().unwrap()),
+                InputType::Type("f32".parse().unwrap()),
+                InputType::PredicateForm(PredicateForm::DontCare(DontCareMethod::AsMerging)),
+            ])),
+        );
+        assert_eq!(variants.next(), None);
+    }
+
+    #[test]
+    fn test_n_variant() {
+        let str = r#"types:
+- [f64, f32]
+n_variant_op: op2
+"#;
+        let input: IntrinsicInput = serde_yaml::from_str(str).expect("failed to parse");
+        let mut variants = input.variants(&Intrinsic::default()).unwrap().into_iter();
+        assert_eq!(
+            variants.next(),
+            Some(InputSet(vec![
+                InputType::Type("f64".parse().unwrap()),
+                InputType::Type("f32".parse().unwrap()),
+                InputType::NVariantOp(None),
+            ]))
+        );
+        assert_eq!(
+            variants.next(),
+            Some(InputSet(vec![
+                InputType::Type("f64".parse().unwrap()),
+                InputType::Type("f32".parse().unwrap()),
+                InputType::NVariantOp(Some("op2".parse().unwrap())),
+            ]))
+        );
+        assert_eq!(variants.next(), None)
+    }
+
+    #[test]
+    fn test_invalid_length() {
+        let str = r#"types: [i32, [[u64], [u32]]]"#;
+        serde_yaml::from_str::<IntrinsicInput>(str).expect_err("failure expected");
+    }
+
+    #[test]
+    fn test_invalid_predication() {
+        let str = "types: []";
+        let input: IntrinsicInput = serde_yaml::from_str(str).expect("failed to parse");
+        let mut intrinsic = Intrinsic::default();
+        intrinsic.signature.name = "test_intrinsic{_mxz}".parse().unwrap();
+        input
+            .variants(&intrinsic)
+            .map(|v| v.collect_vec())
+            .expect_err("failure expected");
+    }
+
+    #[test]
+    fn test_invalid_predication_mask() {
+        "test_intrinsic{_mxy}"
+            .parse::<WildString>()
+            .expect_err("failure expected");
+        "test_intrinsic{_}"
+            .parse::<WildString>()
+            .expect_err("failure expected");
+    }
+
+    #[test]
+    fn test_zeroing_predication() {
+        let str = r#"types: [i64]
+zeroing_method: { drop: inactive }"#;
+        let input: IntrinsicInput = serde_yaml::from_str(str).expect("failed to parse");
+        let mut intrinsic = Intrinsic::default();
+        intrinsic.signature.name = "test_intrinsic{_mxz}".parse().unwrap();
+        let mut variants = input.variants(&intrinsic).unwrap();
+        assert_eq!(
+            variants.next(),
+            Some(InputSet(vec![
+                InputType::Type("i64".parse().unwrap()),
+                InputType::PredicateForm(PredicateForm::Merging),
+            ]))
+        );
+        assert_eq!(
+            variants.next(),
+            Some(InputSet(vec![
+                InputType::Type("i64".parse().unwrap()),
+                InputType::PredicateForm(PredicateForm::DontCare(DontCareMethod::AsZeroing)),
+            ]))
+        );
+        assert_eq!(
+            variants.next(),
+            Some(InputSet(vec![
+                InputType::Type("i64".parse().unwrap()),
+                InputType::PredicateForm(PredicateForm::Zeroing(ZeroingMethod::Drop {
+                    drop: "inactive".parse().unwrap()
+                })),
+            ]))
+        );
+        assert_eq!(variants.next(), None)
+    }
+}
diff --git a/crates/stdarch-gen2/src/intrinsic.rs b/crates/stdarch-gen2/src/intrinsic.rs
new file mode 100644
index 0000000000..d05b71e44d
--- /dev/null
+++ b/crates/stdarch-gen2/src/intrinsic.rs
@@ -0,0 +1,1498 @@
+use itertools::Itertools;
+use proc_macro2::{Punct, Spacing, TokenStream};
+use quote::{format_ident, quote, ToTokens, TokenStreamExt};
+use serde::{Deserialize, Serialize};
+use serde_with::{DeserializeFromStr, SerializeDisplay};
+use std::collections::{HashMap, HashSet};
+use std::fmt;
+use std::ops::RangeInclusive;
+use std::str::FromStr;
+
+use crate::assert_instr::InstructionAssertionsForBaseType;
+use crate::context::{GlobalContext, GroupContext};
+use crate::input::{InputSet, InputSetEntry};
+use crate::predicate_forms::{DontCareMethod, PredicateForm, PredicationMask, ZeroingMethod};
+use crate::{
+    assert_instr::InstructionAssertionMethod,
+    context::{self, ArchitectureSettings, Context, LocalContext, VariableType},
+    expression::{Expression, FnCall, IdentifierType},
+    input::IntrinsicInput,
+    matching::{KindMatchable, SizeMatchable},
+    typekinds::*,
+    wildcards::Wildcard,
+    wildstring::WildString,
+};
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum SubstitutionType {
+    MatchSize(SizeMatchable<WildString>),
+    MatchKind(KindMatchable<WildString>),
+}
+
+impl SubstitutionType {
+    pub fn get(&mut self, ctx: &LocalContext) -> context::Result<WildString> {
+        match self {
+            Self::MatchSize(smws) => {
+                smws.perform_match(ctx)?;
+                Ok(smws.as_ref().clone())
+            }
+            Self::MatchKind(kmws) => {
+                kmws.perform_match(ctx)?;
+                Ok(kmws.as_ref().clone())
+            }
+        }
+    }
+}
+
+/// Mutability level
+#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
+pub enum AccessLevel {
+    /// Immutable
+    R,
+    /// Mutable
+    RW,
+}
+
+/// Function signature argument.
+///
+/// Prepend the `mut` keyword for a mutable argument. Separate argument name
+/// and type with a semicolon `:`. Usage examples:
+/// - Mutable argument: `mut arg1: *u64`
+/// - Immutable argument: `arg2: u32`
+#[derive(Debug, Clone, SerializeDisplay, DeserializeFromStr)]
+pub struct Argument {
+    /// Argument name
+    pub name: WildString,
+    /// Mutability level
+    pub rw: AccessLevel,
+    /// Argument type
+    pub kind: TypeKind,
+}
+
+impl Argument {
+    pub fn populate_variables(&self, vars: &mut HashMap<String, (TypeKind, VariableType)>) {
+        vars.insert(
+            self.name.to_string(),
+            (self.kind.clone(), VariableType::Argument),
+        );
+    }
+}
+
+impl FromStr for Argument {
+    type Err = String;
+
+    fn from_str(s: &str) -> Result<Self, Self::Err> {
+        let mut it = s.splitn(2, ':').map(<str>::trim);
+        if let Some(mut lhs) = it.next().map(|s| s.split_whitespace()) {
+            let lhs_len = lhs.clone().count();
+            match (lhs_len, lhs.next(), it.next()) {
+                (2, Some("mut"), Some(kind)) => Ok(Argument {
+                    name: lhs.next().unwrap().parse()?,
+                    rw: AccessLevel::RW,
+                    kind: kind.parse()?,
+                }),
+                (2, Some(ident), _) => Err(format!("invalid {ident:#?} keyword")),
+                (1, Some(name), Some(kind)) => Ok(Argument {
+                    name: name.parse()?,
+                    rw: AccessLevel::R,
+                    kind: kind.parse()?,
+                }),
+                _ => Err(format!("invalid argument `{s}` provided")),
+            }
+        } else {
+            Err(format!("invalid argument `{s}` provided"))
+        }
+    }
+}
+
+impl fmt::Display for Argument {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        if let AccessLevel::RW = &self.rw {
+            write!(f, "mut ")?;
+        }
+
+        write!(f, "{}: {}", self.name, self.kind)
+    }
+}
+
+impl ToTokens for Argument {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        if let AccessLevel::RW = &self.rw {
+            tokens.append(format_ident!("mut"))
+        }
+
+        let (name, kind) = (format_ident!("{}", self.name.to_string()), &self.kind);
+        tokens.append_all(quote! { #name: #kind })
+    }
+}
+
+/// Static definition part of the signature. It may evaluate to a constant
+/// expression with e.g. `const imm: u64`, or a generic `T: Into<u64>`.
+#[derive(Debug, Clone, SerializeDisplay, DeserializeFromStr)]
+pub enum StaticDefinition {
+    /// Constant expression
+    Constant(Argument),
+    /// Generic type
+    Generic(String),
+}
+
+impl StaticDefinition {
+    pub fn as_variable(&self) -> Option<(String, (TypeKind, VariableType))> {
+        match self {
+            StaticDefinition::Constant(arg) => Some((
+                arg.name.to_string(),
+                (arg.kind.clone(), VariableType::Argument),
+            )),
+            StaticDefinition::Generic(..) => None,
+        }
+    }
+}
+
+impl FromStr for StaticDefinition {
+    type Err = String;
+
+    fn from_str(s: &str) -> Result<Self, Self::Err> {
+        match s.trim() {
+            s if s.starts_with("const ") => Ok(StaticDefinition::Constant(s[6..].trim().parse()?)),
+            s => Ok(StaticDefinition::Generic(s.to_string())),
+        }
+    }
+}
+
+impl fmt::Display for StaticDefinition {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            StaticDefinition::Constant(arg) => write!(f, "const {arg}"),
+            StaticDefinition::Generic(generic) => write!(f, "{generic}"),
+        }
+    }
+}
+
+impl ToTokens for StaticDefinition {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        tokens.append_all(match self {
+            StaticDefinition::Constant(arg) => quote! { const #arg },
+            StaticDefinition::Generic(generic) => {
+                let generic: TokenStream = generic.parse().expect("invalid Rust code");
+                quote! { #generic }
+            }
+        })
+    }
+}
+
+/// Function constraints
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum Constraint {
+    /// Asserts that the given variable equals to any of the given integer values
+    AnyI32 {
+        variable: String,
+        any_values: Vec<i32>,
+    },
+    /// WildString version of RangeI32. If the string values given for the range
+    /// are valid, this gets built into a RangeI32.
+    RangeWildstring {
+        variable: String,
+        range: (WildString, WildString),
+    },
+    /// Asserts that the given variable's value falls in the specified range
+    RangeI32 {
+        variable: String,
+        range: SizeMatchable<RangeInclusive<i32>>,
+    },
+    /// Asserts that the number of elements/lanes does not exceed the 2048-bit SVE constraint
+    SVEMaxElems {
+        variable: String,
+        sve_max_elems_type: TypeKind,
+    },
+    /// Asserts that the number of elements/lanes does not exceed the 128-bit register constraint
+    VecMaxElems {
+        variable: String,
+        vec_max_elems_type: TypeKind,
+    },
+}
+
+impl Constraint {
+    fn variable(&self) -> &str {
+        match self {
+            Constraint::AnyI32 { variable, .. }
+            | Constraint::RangeWildstring { variable, .. }
+            | Constraint::RangeI32 { variable, .. }
+            | Constraint::SVEMaxElems { variable, .. }
+            | Constraint::VecMaxElems { variable, .. } => variable,
+        }
+    }
+    pub fn build(&mut self, ctx: &Context) -> context::Result {
+        if let Self::RangeWildstring {
+            variable,
+            range: (min, max),
+        } = self
+        {
+            min.build_acle(ctx.local)?;
+            max.build_acle(ctx.local)?;
+            let min = min.to_string();
+            let max = max.to_string();
+            let min: i32 = min
+                .parse()
+                .map_err(|_| format!("the minimum value `{min}` is not a valid number"))?;
+            let max: i32 = max
+                .parse()
+                .map_err(|_| format!("the maximum value `{max}` is not a valid number"))?;
+            *self = Self::RangeI32 {
+                variable: variable.to_owned(),
+                range: SizeMatchable::Matched(RangeInclusive::new(min, max)),
+            }
+        }
+
+        if let Self::SVEMaxElems {
+            sve_max_elems_type: ty,
+            ..
+        }
+        | Self::VecMaxElems {
+            vec_max_elems_type: ty,
+            ..
+        } = self
+        {
+            if let Some(w) = ty.wildcard() {
+                ty.populate_wildcard(ctx.local.provide_type_wildcard(w)?)?;
+            }
+        }
+
+        if let Self::RangeI32 { range, .. } = self {
+            range.perform_match(ctx.local)?;
+        }
+
+        let variable = self.variable();
+        ctx.local
+            .variables
+            .contains_key(variable)
+            .then_some(())
+            .ok_or_else(|| format!("cannot build constraint, could not find variable {variable}"))
+    }
+}
+
+/// Function signature
+#[derive(Debug, Clone, Default, Serialize, Deserialize)]
+pub struct Signature {
+    /// Function name
+    pub name: WildString,
+    /// List of function arguments, leave unset or empty for no arguments
+    pub arguments: Vec<Argument>,
+    /// Function return type, leave unset for void
+    pub return_type: Option<TypeKind>,
+
+    /// List of static definitions, leave unset of empty if not required
+    #[serde(default)]
+    pub static_defs: Vec<StaticDefinition>,
+
+    /// **Internal use only.**
+    /// Condition for which the ultimate function is specific to predicates.
+    #[serde(skip)]
+    pub is_predicate_specific: bool,
+
+    /// **Internal use only.**
+    /// Setting this property will trigger the signature builder to convert any `svbool*_t` to `svbool_t` in the input and output.
+    #[serde(skip)]
+    pub predicate_needs_conversion: bool,
+}
+
+impl Signature {
+    pub fn drop_argument(&mut self, arg_name: &WildString) -> Result<(), String> {
+        if let Some(idx) = self
+            .arguments
+            .iter()
+            .position(|arg| arg.name.to_string() == arg_name.to_string())
+        {
+            self.arguments.remove(idx);
+            Ok(())
+        } else {
+            Err(format!("no argument {arg_name} found to drop"))
+        }
+    }
+
+    pub fn build(&mut self, ctx: &LocalContext) -> context::Result {
+        self.name.build_acle(ctx)?;
+
+        if let Some(ref mut return_type) = self.return_type {
+            if let Some(w) = return_type.clone().wildcard() {
+                return_type.populate_wildcard(ctx.provide_type_wildcard(w)?)?;
+            }
+        }
+
+        self.arguments
+            .iter_mut()
+            .try_for_each(|arg| arg.name.build_acle(ctx))?;
+
+        self.arguments
+            .iter_mut()
+            .filter_map(|arg| {
+                arg.kind
+                    .clone()
+                    .wildcard()
+                    .map(|w| (&mut arg.kind, w.clone()))
+            })
+            .try_for_each(|(ty, w)| ty.populate_wildcard(ctx.provide_type_wildcard(&w)?))
+    }
+
+    pub fn fn_name(&self) -> WildString {
+        self.name.replace(['[', ']'], "")
+    }
+
+    pub fn doc_name(&self) -> String {
+        self.name.to_string()
+    }
+}
+
+impl ToTokens for Signature {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        let name_ident = format_ident!("{}", self.fn_name().to_string());
+        let arguments = self
+            .arguments
+            .clone()
+            .into_iter()
+            .map(|mut arg| {
+                if arg
+                    .kind
+                    .vector()
+                    .map_or(false, |ty| ty.base_type().is_bool())
+                    && self.predicate_needs_conversion
+                {
+                    arg.kind = TypeKind::Vector(VectorType::make_predicate_from_bitsize(8))
+                }
+                arg
+            })
+            .collect_vec();
+        let static_defs = &self.static_defs;
+        tokens.append_all(quote! { fn #name_ident<#(#static_defs),*>(#(#arguments),*) });
+
+        if let Some(ref return_type) = self.return_type {
+            if return_type
+                .vector()
+                .map_or(false, |ty| ty.base_type().is_bool())
+                && self.predicate_needs_conversion
+            {
+                tokens.append_all(quote! { -> svbool_t })
+            } else {
+                tokens.append_all(quote! { -> #return_type })
+            }
+        }
+    }
+}
+
+#[derive(Debug, Clone)]
+pub struct LLVMLinkAttribute {
+    pub arch: String,
+    pub link: String,
+}
+
+impl ToTokens for LLVMLinkAttribute {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        let LLVMLinkAttribute { arch, link } = self;
+        tokens.append_all(quote! {
+            #[cfg_attr(target_arch = #arch, link_name = #link)]
+        })
+    }
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+pub struct LLVMLink {
+    /// LLVM link function name without namespace and types,
+    /// e.g. `st1` in `llvm.aarch64.sve.st1.nxv4i32`
+    pub name: WildString,
+
+    /// LLVM link signature arguments, leave unset if it inherits from intrinsic's signature
+    pub arguments: Option<Vec<Argument>>,
+    /// LLVM link signature return type, leave unset if it inherits from intrinsic's signature
+    pub return_type: Option<TypeKind>,
+
+    /// **Internal use only. Do not set.**
+    /// Attribute LLVM links for the function. First element is the architecture it targets,
+    /// second element is the LLVM link itself.
+    #[serde(skip)]
+    pub links: Option<Vec<LLVMLinkAttribute>>,
+
+    /// **Internal use only. Do not set.**
+    /// Generated signature from these `arguments` and/or `return_type` if set, and the intrinsic's signature.
+    #[serde(skip)]
+    pub signature: Option<Box<Signature>>,
+}
+
+impl LLVMLink {
+    pub fn resolve(&self, cfg: &ArchitectureSettings) -> String {
+        self.name
+            .starts_with("llvm")
+            .then(|| self.name.to_string())
+            .unwrap_or_else(|| format!("{}.{}", cfg.llvm_link_prefix, self.name))
+    }
+
+    pub fn build_and_save(&mut self, ctx: &mut Context) -> context::Result {
+        self.build(ctx)?;
+
+        // Save LLVM link to the group context
+        ctx.global.arch_cfgs.iter().for_each(|cfg| {
+            ctx.group
+                .links
+                .insert(self.resolve(cfg), ctx.local.input.clone());
+        });
+
+        Ok(())
+    }
+
+    pub fn build(&mut self, ctx: &mut Context) -> context::Result {
+        let mut sig_name = ctx.local.signature.name.clone();
+        sig_name.prepend_str("_");
+
+        let mut sig = Signature {
+            name: sig_name,
+            arguments: self
+                .arguments
+                .clone()
+                .unwrap_or_else(|| ctx.local.signature.arguments.clone()),
+            return_type: self
+                .return_type
+                .clone()
+                .or_else(|| ctx.local.signature.return_type.clone()),
+            static_defs: vec![],
+            is_predicate_specific: ctx.local.signature.is_predicate_specific,
+            predicate_needs_conversion: false,
+        };
+
+        sig.build(ctx.local)?;
+        self.name.build(ctx.local, TypeRepr::LLVMMachine)?;
+
+        // Add link function name to context
+        ctx.local
+            .substitutions
+            .insert(Wildcard::LLVMLink, sig.fn_name().to_string());
+
+        self.signature = Some(Box::new(sig));
+        self.links = Some(
+            ctx.global
+                .arch_cfgs
+                .iter()
+                .map(|cfg| LLVMLinkAttribute {
+                    arch: cfg.arch_name.to_owned(),
+                    link: self.resolve(cfg),
+                })
+                .collect_vec(),
+        );
+
+        Ok(())
+    }
+
+    /// Alters all the unsigned types from the signature, as unsupported by LLVM.
+    pub fn sanitise_uints(&mut self) {
+        let transform = |tk: &mut TypeKind| {
+            if let Some(BaseType::Sized(BaseTypeKind::UInt, size)) = tk.base_type() {
+                *tk.base_type_mut().unwrap() = BaseType::Sized(BaseTypeKind::Int, *size)
+            }
+        };
+
+        if let Some(sig) = self.signature.as_mut() {
+            for arg in sig.arguments.iter_mut() {
+                transform(&mut arg.kind);
+            }
+
+            sig.return_type.as_mut().map(transform);
+        }
+    }
+
+    /// Make a function call to the LLVM link
+    pub fn make_fn_call(&self, intrinsic_sig: &Signature) -> context::Result<Expression> {
+        let link_sig = self.signature.as_ref().ok_or_else(|| {
+            "cannot derive the LLVM link call, as it does not hold a valid function signature"
+                .to_string()
+        })?;
+
+        if intrinsic_sig.arguments.len() != link_sig.arguments.len() {
+            return Err(
+                "cannot derive the LLVM link call, the number of arguments does not match"
+                    .to_string(),
+            );
+        }
+
+        let call_args = intrinsic_sig
+            .arguments
+            .iter()
+            .zip(link_sig.arguments.iter())
+            .map(|(intrinsic_arg, link_arg)| {
+                // Could also add a type check...
+                if intrinsic_arg.name == link_arg.name {
+                    Ok(Expression::Identifier(
+                        intrinsic_arg.name.to_owned(),
+                        IdentifierType::Variable,
+                    ))
+                } else {
+                    Err("cannot derive the LLVM link call, the arguments do not match".to_string())
+                }
+            })
+            .try_collect()?;
+
+        Ok(FnCall::new_expression(link_sig.fn_name().into(), call_args))
+    }
+
+    /// Given a FnCall, apply all the predicate and unsigned conversions as required.
+    pub fn apply_conversions_to_call(
+        &self,
+        mut fn_call: FnCall,
+        ctx: &LocalContext,
+    ) -> context::Result<Expression> {
+        use BaseType::{Sized, Unsized};
+        use BaseTypeKind::{Bool, UInt};
+        use VariableType::Argument;
+
+        let convert =
+            |method: &str, ex| Expression::MethodCall(Box::new(ex), method.to_string(), vec![]);
+
+        fn_call.1 = fn_call
+            .1
+            .into_iter()
+            .map(|arg| -> context::Result<Expression> {
+                if let Expression::Identifier(ref var_name, IdentifierType::Variable) = arg {
+                    let (kind, scope) = ctx
+                        .variables
+                        .get(&var_name.to_string())
+                        .ok_or_else(|| format!("invalid variable {var_name:?} being referenced"))?;
+
+                    match (scope, kind.base_type()) {
+                        (Argument, Some(Sized(Bool, bitsize))) if *bitsize != 8 => {
+                            Ok(convert("into", arg))
+                        }
+                        (Argument, Some(Sized(UInt, _) | Unsized(UInt))) => {
+                            Ok(convert("as_signed", arg))
+                        }
+                        _ => Ok(arg),
+                    }
+                } else {
+                    Ok(arg)
+                }
+            })
+            .try_collect()?;
+
+        let return_type_requires_conversion = self
+            .signature
+            .as_ref()
+            .and_then(|sig| sig.return_type.as_ref())
+            .and_then(|ty| {
+                if let Some(Sized(Bool, bitsize)) = ty.base_type() {
+                    (*bitsize != 8).then_some(Bool)
+                } else if let Some(Sized(UInt, _) | Unsized(UInt)) = ty.base_type() {
+                    Some(UInt)
+                } else {
+                    None
+                }
+            });
+
+        let fn_call = Expression::FnCall(fn_call);
+        match return_type_requires_conversion {
+            Some(Bool) => Ok(convert("into", fn_call)),
+            Some(UInt) => Ok(convert("as_unsigned", fn_call)),
+            _ => Ok(fn_call),
+        }
+    }
+}
+
+impl ToTokens for LLVMLink {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        assert!(
+            self.signature.is_some() && self.links.is_some(),
+            "expression {self:#?} was not built before calling to_tokens"
+        );
+
+        let signature = self.signature.as_ref().unwrap();
+        let links = self.links.as_ref().unwrap();
+        tokens.append_all(quote! {
+            extern "C" {
+                #(#links),*
+                #signature;
+            }
+        })
+    }
+}
+
+#[derive(Debug, Clone, Default, Serialize, Deserialize)]
+#[serde(rename_all = "snake_case")]
+pub enum FunctionVisibility {
+    #[default]
+    Public,
+    Private,
+}
+
+/// Whether to generate a load/store test, and which typeset index
+/// represents the data type of the load/store target address
+#[derive(Clone, Debug, Default, Serialize, Deserialize)]
+#[serde(rename_all = "snake_case")]
+pub enum Test {
+    #[default]
+    #[serde(skip)]
+    None, // Covered by `intrinsic-test`
+    Load(usize),
+    Store(usize),
+}
+
+impl Test {
+    pub fn get_typeset_index(&self) -> Option<usize> {
+        match *self {
+            Test::Load(n) => Some(n),
+            Test::Store(n) => Some(n),
+            _ => None,
+        }
+    }
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "snake_case")]
+pub enum Safety {
+    Safe,
+    Unsafe(Vec<UnsafetyComment>),
+}
+
+impl Safety {
+    /// Return `Ok(Safety::Safe)` if safety appears reasonable for the given `intrinsic`'s name and
+    /// prototype. Otherwise, return `Err()` with a suitable diagnostic.
+    fn safe_checked(intrinsic: &Intrinsic) -> Result<Self, String> {
+        let name = intrinsic.signature.doc_name();
+        if name.starts_with("sv") {
+            let handles_pointers = intrinsic
+                .signature
+                .arguments
+                .iter()
+                .any(|arg| matches!(arg.kind, TypeKind::Pointer(..)));
+            if name.starts_with("svld")
+                || name.starts_with("svst")
+                || name.starts_with("svprf")
+                || name.starts_with("svundef")
+                || handles_pointers
+            {
+                let doc = intrinsic.doc.as_ref().map(|s| s.to_string());
+                let doc = doc.as_deref().unwrap_or("...");
+                Err(format!(
+                    "`{name}` has no safety specification, but it looks like it should be unsafe. \
+                Consider specifying (un)safety explicitly:
+
+  - name: {name}
+    doc: {doc}
+    safety:
+      unsafe:
+        - ...
+    ...
+"
+                ))
+            } else {
+                Ok(Self::Safe)
+            }
+        } else {
+            Err(format!(
+                "Safety::safe_checked() for non-SVE intrinsic: {name}"
+            ))
+        }
+    }
+
+    fn is_safe(&self) -> bool {
+        match self {
+            Self::Safe => true,
+            Self::Unsafe(..) => false,
+        }
+    }
+
+    fn is_unsafe(&self) -> bool {
+        !self.is_safe()
+    }
+
+    fn has_doc_comments(&self) -> bool {
+        match self {
+            Self::Safe => false,
+            Self::Unsafe(v) => !v.is_empty(),
+        }
+    }
+
+    fn doc_comments(&self) -> &[UnsafetyComment] {
+        match self {
+            Self::Safe => &[],
+            Self::Unsafe(v) => v.as_slice(),
+        }
+    }
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(rename_all = "snake_case")]
+pub enum UnsafetyComment {
+    Custom(String),
+    Uninitialized,
+    PointerOffset(GovernedBy),
+    PointerOffsetVnum(GovernedBy),
+    Dereference(GovernedBy),
+    UnpredictableOnFault,
+    NonTemporal,
+    NoProvenance(String),
+}
+
+#[derive(Debug, Clone, Default, Serialize, Deserialize)]
+#[serde(rename_all = "snake_case")]
+pub enum GovernedBy {
+    #[default]
+    Predicated,
+    PredicatedNonFaulting,
+    PredicatedFirstFaulting,
+}
+
+impl fmt::Display for GovernedBy {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            Self::Predicated => write!(f, " (governed by `pg`)"),
+            Self::PredicatedNonFaulting => write!(
+                f,
+                " (governed by `pg`, the first-fault register (`FFR`) \
+                and non-faulting behaviour)"
+            ),
+            Self::PredicatedFirstFaulting => write!(
+                f,
+                " (governed by `pg`, the first-fault register (`FFR`) \
+                and first-faulting behaviour)"
+            ),
+        }
+    }
+}
+
+impl fmt::Display for UnsafetyComment {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            Self::Custom(s) => s.fmt(f),
+            Self::Uninitialized => write!(
+                f,
+                "This creates an uninitialized value, and may be unsound (like \
+                [`core::mem::uninitialized`])."
+            ),
+            Self::PointerOffset(gov) => write!(
+                f,
+                "[`pointer::offset`](pointer#method.offset) safety constraints must \
+                be met for the address calculation for each active element{gov}."
+            ),
+            Self::PointerOffsetVnum(gov) => write!(
+                f,
+                "[`pointer::offset`](pointer#method.offset) safety constraints must \
+                be met for the address calculation for each active element{gov}. \
+                In particular, note that `vnum` is scaled by the vector \
+                length, `VL`, which is not known at compile time."
+            ),
+            Self::Dereference(gov) => write!(
+                f,
+                "This dereferences and accesses the calculated address for each \
+                active element{gov}."
+            ),
+            Self::NonTemporal => write!(
+                f,
+                "Non-temporal accesses have special memory ordering rules, and \
+                [explicit barriers may be required for some applications]\
+                (https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."
+            ),
+            Self::NoProvenance(arg) => write!(
+                f,
+                "Addresses passed in `{arg}` lack provenance, so this is similar to using a \
+                `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before \
+                using it."
+            ),
+            Self::UnpredictableOnFault => write!(
+                f,
+                "Result lanes corresponding to inactive FFR lanes (either before or as a result \
+                of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of \
+                predication. Refer to architectural documentation for details."
+            ),
+        }
+    }
+}
+
+#[derive(Debug, Clone, Default, Serialize, Deserialize)]
+pub struct Intrinsic {
+    #[serde(default)]
+    pub visibility: FunctionVisibility,
+    #[serde(default)]
+    pub doc: Option<WildString>,
+    #[serde(flatten)]
+    pub signature: Signature,
+    /// Function sequential composition
+    pub compose: Vec<Expression>,
+    /// Input to generate the intrinsic against. Leave empty if the intrinsic
+    /// does not have any variants.
+    /// Specific variants contain one InputSet
+    #[serde(flatten, default)]
+    pub input: IntrinsicInput,
+    #[serde(default)]
+    pub constraints: Vec<Constraint>,
+    /// Additional target features to add to the global settings
+    #[serde(default)]
+    pub target_features: Vec<String>,
+    /// Should the intrinsic be `unsafe`? By default, the generator will try to guess from the
+    /// prototype, but it errs on the side of `unsafe`, and prints a warning in that case.
+    #[serde(default)]
+    pub safety: Option<Safety>,
+    #[serde(default)]
+    pub substitutions: HashMap<String, SubstitutionType>,
+    /// List of the only indices in a typeset that require conversion to signed
+    /// when deferring unsigned intrinsics to signed. (optional, default
+    /// behaviour is all unsigned types are converted to signed)
+    #[serde(default)]
+    pub defer_to_signed_only_indices: HashSet<usize>,
+    pub assert_instr: Vec<InstructionAssertionMethod>,
+    /// Whether we should generate a test for this intrinsic
+    #[serde(default)]
+    pub test: Test,
+    /// Primary base type, used for instruction assertion.
+    #[serde(skip)]
+    pub base_type: Option<BaseType>,
+}
+
+impl Intrinsic {
+    pub fn llvm_link(&self) -> Option<&LLVMLink> {
+        self.compose.iter().find_map(|ex| {
+            if let Expression::LLVMLink(llvm_link) = ex {
+                Some(llvm_link)
+            } else {
+                None
+            }
+        })
+    }
+
+    pub fn llvm_link_mut(&mut self) -> Option<&mut LLVMLink> {
+        self.compose.iter_mut().find_map(|ex| {
+            if let Expression::LLVMLink(llvm_link) = ex {
+                Some(llvm_link)
+            } else {
+                None
+            }
+        })
+    }
+
+    pub fn generate_variants(&self, global_ctx: &GlobalContext) -> context::Result<Vec<Intrinsic>> {
+        let wrap_err = |err| format!("{}: {err}", self.signature.name);
+
+        let mut group_ctx = GroupContext::default();
+        self.input
+            .variants(self)
+            .map_err(wrap_err)?
+            .map(|input| {
+                self.generate_variant(input.clone(), &mut group_ctx, global_ctx)
+                    .map_err(wrap_err)
+                    .map(|variant| (variant, input))
+            })
+            .collect::<context::Result<Vec<_>>>()
+            .and_then(|mut variants| {
+                variants.sort_by_cached_key(|(_, input)| input.to_owned());
+
+                if variants.is_empty() {
+                    let standalone_variant = self
+                        .generate_variant(InputSet::default(), &mut group_ctx, global_ctx)
+                        .map_err(wrap_err)?;
+
+                    Ok(vec![standalone_variant])
+                } else {
+                    Ok(variants
+                        .into_iter()
+                        .map(|(variant, _)| variant)
+                        .collect_vec())
+                }
+            })
+    }
+
+    pub fn generate_variant(
+        &self,
+        input: InputSet,
+        group_ctx: &mut GroupContext,
+        global_ctx: &GlobalContext,
+    ) -> context::Result<Intrinsic> {
+        let mut variant = self.clone();
+
+        variant.input.types = vec![InputSetEntry::new(vec![input.clone()])];
+
+        let mut local_ctx = LocalContext::new(input, self);
+        let mut ctx = Context {
+            local: &mut local_ctx,
+            group: group_ctx,
+            global: global_ctx,
+        };
+
+        variant.pre_build(&mut ctx)?;
+
+        match ctx.local.predicate_form().cloned() {
+            Some(PredicateForm::DontCare(method)) => {
+                variant.compose = variant.generate_dont_care_pass_through(&mut ctx, method)?
+            }
+            Some(PredicateForm::Zeroing(method)) => {
+                variant.compose = variant.generate_zeroing_pass_through(&mut ctx, method)?
+            }
+            _ => {
+                for idx in 0..variant.compose.len() {
+                    let mut ex = variant.compose[idx].clone();
+                    ex.build(&variant, &mut ctx)?;
+                    variant.compose[idx] = ex;
+                }
+            }
+        };
+
+        variant.post_build(&mut ctx)?;
+
+        if let Some(n_variant_op) = ctx.local.n_variant_op().cloned() {
+            variant.generate_n_variant(n_variant_op, &mut ctx)
+        } else {
+            Ok(variant)
+        }
+    }
+
+    /// Implement a "zeroing" (_z) method by calling an existing "merging" (_m) method, as required.
+    fn generate_zeroing_pass_through(
+        &mut self,
+        ctx: &mut Context,
+        method: ZeroingMethod,
+    ) -> context::Result<Vec<Expression>> {
+        PredicationMask::try_from(&ctx.local.signature.name)
+            .ok()
+            .filter(|mask| mask.has_merging())
+            .ok_or_else(|| format!("cannot generate zeroing passthrough for {}, no merging predicate form is specified", self.signature.name))?;
+
+        // Determine the function to pass through to.
+        let mut target_ctx = ctx.local.clone();
+        // Change target function predicate form to merging
+        *target_ctx.input.iter_mut()
+            .find_map(|arg| arg.predicate_form_mut())
+            .expect("failed to generate zeroing pass through, could not find predicate form in the InputSet") = PredicateForm::Merging;
+
+        let mut sig = target_ctx.signature.clone();
+        sig.build(&target_ctx)?;
+
+        let args_as_expressions = |arg: &Argument| -> context::Result<Expression> {
+            let arg_name = arg.name.to_string();
+            match &method {
+                ZeroingMethod::Drop { drop } if arg_name == drop.to_string() => {
+                    Ok(PredicateForm::make_zeroinitializer(&arg.kind))
+                }
+                ZeroingMethod::Select { select } if arg_name == select.to_string() => {
+                    let pg = sig
+                        .arguments
+                        .iter()
+                        .find_map(|arg| match arg.kind.vector() {
+                            Some(ty) if ty.base_type().is_bool() => Some(arg.name.clone()),
+                            _ => None,
+                        })
+                        .ok_or_else(|| {
+                            format!("cannot generate zeroing passthrough for {}, no predicate found in the signature for zero selection", self.signature.name)
+                        })?;
+                    Ok(PredicateForm::make_zeroselector(
+                        pg,
+                        select.clone(),
+                        &arg.kind,
+                    ))
+                }
+                _ => Ok(arg.into()),
+            }
+        };
+
+        let name: Expression = sig.fn_name().into();
+        let args: Vec<Expression> = sig
+            .arguments
+            .iter()
+            .map(args_as_expressions)
+            .try_collect()?;
+        let statics: Vec<Expression> = sig
+            .static_defs
+            .iter()
+            .map(|sd| sd.try_into())
+            .try_collect()?;
+        let mut call: Expression = FnCall(Box::new(name), args, statics).into();
+        call.build(self, ctx)?;
+        Ok(vec![call])
+    }
+
+    /// Implement a "don't care" (_x) method by calling an existing "merging" (_m).
+    fn generate_dont_care_pass_through(
+        &mut self,
+        ctx: &mut Context,
+        method: DontCareMethod,
+    ) -> context::Result<Vec<Expression>> {
+        PredicationMask::try_from(&ctx.local.signature.name).and_then(|mask| match method {
+            DontCareMethod::AsMerging if mask.has_merging() => Ok(()),
+            DontCareMethod::AsZeroing if mask.has_zeroing() => Ok(()),
+            _ => Err(format!(
+                "cannot generate don't care passthrough for {}, no {} predicate form is specified",
+                self.signature.name,
+                match method {
+                    DontCareMethod::AsMerging => "merging",
+                    DontCareMethod::AsZeroing => "zeroing",
+                    _ => unreachable!(),
+                }
+            )),
+        })?;
+
+        // Determine the function to pass through to.
+        let mut target_ctx = ctx.local.clone();
+        // Change target function predicate form to merging
+        *target_ctx.input.iter_mut()
+            .find_map(|arg| arg.predicate_form_mut())
+            .expect("failed to generate don't care passthrough, could not find predicate form in the InputSet") = PredicateForm::Merging;
+
+        let mut sig = target_ctx.signature.clone();
+        sig.build(&target_ctx)?;
+
+        // We might need to drop an argument for a zeroing pass-through.
+        let drop = match (method, &self.input.predication_methods.zeroing_method) {
+            (DontCareMethod::AsZeroing, Some(ZeroingMethod::Drop { drop })) => Some(drop),
+            _ => None,
+        };
+
+        let name: Expression = sig.fn_name().into();
+        let args: Vec<Expression> = sig
+            .arguments
+            .iter()
+            .map(|arg| {
+                if Some(arg.name.to_string()) == drop.as_ref().map(|v| v.to_string()) {
+                    // This argument is present in the _m form, but missing from the _x form. Clang
+                    // typically replaces these with an uninitialised vector, but to avoid
+                    // materialising uninitialised values in Rust, we instead merge with a known
+                    // vector. This usually results in the same code generation.
+                    // TODO: In many cases, it'll be better to use an unpredicated (or zeroing) form.
+                    sig.arguments
+                        .iter()
+                        .filter(|&other| arg.name.to_string() != other.name.to_string())
+                        .find_map(|other| {
+                            arg.kind.express_reinterpretation_from(&other.kind, other)
+                        })
+                        .unwrap_or_else(|| PredicateForm::make_zeroinitializer(&arg.kind))
+                } else {
+                    arg.into()
+                }
+            })
+            .collect();
+        let statics: Vec<Expression> = sig
+            .static_defs
+            .iter()
+            .map(|sd| sd.try_into())
+            .try_collect()?;
+        let mut call: Expression = FnCall(Box::new(name), args, statics).into();
+        call.build(self, ctx)?;
+        Ok(vec![call])
+    }
+
+    /// Implement a "_n" variant based on the given operand
+    fn generate_n_variant(
+        &self,
+        mut n_variant_op: WildString,
+        ctx: &mut Context,
+    ) -> context::Result<Intrinsic> {
+        let mut variant = self.clone();
+
+        n_variant_op.build_acle(ctx.local)?;
+
+        let n_op_arg_idx = variant
+            .signature
+            .arguments
+            .iter_mut()
+            .position(|arg| arg.name.to_string() == n_variant_op.to_string())
+            .ok_or_else(|| {
+                format!(
+                    "cannot generate `_n` variant for {}, operand `{n_variant_op}` not found",
+                    variant.signature.name
+                )
+            })?;
+
+        let has_n_wildcard = ctx
+            .local
+            .signature
+            .name
+            .wildcards()
+            .any(|w| matches!(w, Wildcard::NVariant));
+
+        if !has_n_wildcard {
+            return Err(format!("cannot generate `_n` variant for {}, no wildcard {{_n}} was specified in the intrinsic's name", variant.signature.name));
+        }
+
+        // Build signature
+        variant.signature = ctx.local.signature.clone();
+        if let Some(pf) = ctx.local.predicate_form() {
+            // WARN: this may break in the future according to the underlying implementation
+            // Drops unwanted arguments if needed (required for the collection of arguments to pass to the function)
+            pf.post_build(&mut variant)?;
+        }
+
+        let sig = &mut variant.signature;
+
+        ctx.local
+            .substitutions
+            .insert(Wildcard::NVariant, "_n".to_owned());
+
+        let arg_kind = &mut sig.arguments.get_mut(n_op_arg_idx).unwrap().kind;
+        *arg_kind = match arg_kind {
+            TypeKind::Wildcard(Wildcard::SVEType(idx, None)) => {
+                TypeKind::Wildcard(Wildcard::Type(*idx))
+            }
+            _ => {
+                return Err(format!(
+                "cannot generate `_n` variant for {}, the given operand is not a valid SVE type",
+                variant.signature.name
+            ))
+            }
+        };
+
+        sig.build(ctx.local)?;
+
+        // Build compose
+        let name: Expression = self.signature.fn_name().into();
+        let args: Vec<Expression> = sig
+            .arguments
+            .iter()
+            .enumerate()
+            .map(|(idx, arg)| {
+                let ty = arg.kind.acle_notation_repr();
+                if idx == n_op_arg_idx {
+                    FnCall::new_expression(
+                        WildString::from(format!("svdup_n_{ty}")).into(),
+                        vec![arg.into()],
+                    )
+                } else {
+                    arg.into()
+                }
+            })
+            .collect();
+        let statics: Vec<Expression> = sig
+            .static_defs
+            .iter()
+            .map(|sd| sd.try_into())
+            .try_collect()?;
+        let mut call: Expression = FnCall(Box::new(name), args, statics).into();
+        call.build(self, ctx)?;
+
+        variant.compose = vec![call];
+        variant.signature.predicate_needs_conversion = true;
+
+        Ok(variant)
+    }
+
+    fn pre_build(&mut self, ctx: &mut Context) -> context::Result {
+        self.substitutions
+            .iter_mut()
+            .try_for_each(|(k, v)| -> context::Result {
+                let mut ws = v.get(ctx.local)?;
+                ws.build_acle(ctx.local)?;
+                ctx.local
+                    .substitutions
+                    .insert(Wildcard::Custom(k.to_owned()), ws.to_string());
+                Ok(())
+            })?;
+
+        self.signature.build(ctx.local)?;
+
+        if self.safety.is_none() {
+            self.safety = match Safety::safe_checked(self) {
+                Ok(safe) => Some(safe),
+                Err(err) => {
+                    eprintln!("{err}");
+                    return Err(format!(
+                        "Refusing to infer unsafety for {name}",
+                        name = self.signature.doc_name()
+                    ));
+                }
+            }
+        }
+
+        if let Some(doc) = &mut self.doc {
+            doc.build_acle(ctx.local)?
+        }
+
+        // Add arguments to variable tracking
+        self.signature
+            .arguments
+            .iter()
+            .for_each(|arg| arg.populate_variables(&mut ctx.local.variables));
+
+        // Add constant expressions to variable tracking
+        self.signature
+            .static_defs
+            .iter()
+            .filter_map(StaticDefinition::as_variable)
+            .for_each(|(var_name, var_properties)| {
+                ctx.local.variables.insert(var_name, var_properties);
+            });
+
+        // Pre-build compose expressions
+        for idx in 0..self.compose.len() {
+            let mut ex = self.compose[idx].clone();
+            ex.pre_build(ctx)?;
+            self.compose[idx] = ex;
+        }
+
+        if !ctx.local.input.is_empty() {
+            // We simplify the LLVM link transmute logic by deferring to a variant employing the same LLVM link where possible
+            if let Some(link) = self.compose.iter().find_map(|ex| match ex {
+                Expression::LLVMLink(link) => Some(link),
+                _ => None,
+            }) {
+                let mut link = link.clone();
+                link.build(ctx)?;
+
+                for cfg in ctx.global.arch_cfgs.iter() {
+                    let expected_link = link.resolve(cfg);
+                    if let Some(target_inputset) = ctx.group.links.get(&expected_link) {
+                        self.defer_to_existing_llvm_link(ctx.local, target_inputset)?;
+                        break;
+                    }
+                }
+            }
+        }
+
+        self.assert_instr
+            .iter_mut()
+            .try_for_each(|ai| ai.build(ctx))?;
+
+        // Prepend constraint assertions
+        self.constraints.iter_mut().try_for_each(|c| c.build(ctx))?;
+        let assertions: Vec<_> = self
+            .constraints
+            .iter()
+            .map(|c| ctx.local.make_assertion_from_constraint(c))
+            .try_collect()?;
+        self.compose.splice(0..0, assertions);
+
+        Ok(())
+    }
+
+    fn post_build(&mut self, ctx: &mut Context) -> context::Result {
+        if let Some(Expression::LLVMLink(link)) = self.compose.last() {
+            let mut fn_call = link.make_fn_call(&self.signature)?;
+            // Required to inject conversions
+            fn_call.build(self, ctx)?;
+            self.compose.push(fn_call)
+        }
+
+        if let Some(llvm_link) = self.llvm_link_mut() {
+            // Turn all Rust unsigned types into signed
+            llvm_link.sanitise_uints();
+        }
+
+        if let Some(predicate_form) = ctx.local.predicate_form() {
+            predicate_form.post_build(self)?
+        }
+
+        // Set for ToTokens<Signature> to display a generic svbool_t
+        self.signature.predicate_needs_conversion = true;
+
+        // Set base type kind for instruction assertion
+        self.base_type = ctx
+            .local
+            .input
+            .get(0)
+            .and_then(|arg| arg.typekind())
+            .and_then(|ty| ty.base_type())
+            .map(BaseType::clone);
+
+        // Add global target features
+        self.target_features = ctx
+            .global
+            .arch_cfgs
+            .iter()
+            .flat_map(|cfg| cfg.target_feature.clone())
+            .chain(self.target_features.clone())
+            .collect_vec();
+
+        Ok(())
+    }
+
+    fn defer_to_existing_llvm_link(
+        &mut self,
+        ctx: &LocalContext,
+        target_inputset: &InputSet,
+    ) -> context::Result {
+        let mut target_ctx = ctx.clone();
+        target_ctx.input = target_inputset.clone();
+
+        let mut target_signature = target_ctx.signature.clone();
+        target_signature.build(&target_ctx)?;
+
+        let drop_var = if let Some(pred) = ctx.predicate_form().cloned() {
+            match pred {
+                PredicateForm::Zeroing(ZeroingMethod::Drop { drop }) => Some(drop),
+                PredicateForm::DontCare(DontCareMethod::AsZeroing) => {
+                    if let Some(ZeroingMethod::Drop { drop }) =
+                        self.input.predication_methods.zeroing_method.to_owned()
+                    {
+                        Some(drop)
+                    } else {
+                        None
+                    }
+                }
+                _ => None,
+            }
+        } else {
+            None
+        };
+
+        let call_method =
+            |ex, method: &str| Expression::MethodCall(Box::new(ex), method.to_string(), vec![]);
+        let as_unsigned = |ex| call_method(ex, "as_unsigned");
+        let as_signed = |ex| call_method(ex, "as_signed");
+        let convert_if_required = |w: Option<&Wildcard>, from: &InputSet, to: &InputSet, ex| {
+            if let Some(w) = w {
+                if let Some(dest_idx) = w.get_typeset_index() {
+                    let from_type = from.get(dest_idx);
+                    let to_type = to.get(dest_idx);
+
+                    if from_type != to_type {
+                        let from_base_type = from_type
+                            .and_then(|in_arg| in_arg.typekind())
+                            .and_then(|ty| ty.base_type())
+                            .map(|bt| bt.kind());
+                        let to_base_type = to_type
+                            .and_then(|in_arg| in_arg.typekind())
+                            .and_then(|ty| ty.base_type())
+                            .map(|bt| bt.kind());
+
+                        match (from_base_type, to_base_type) {
+                            // Use AsSigned for uint -> int
+                            (Some(BaseTypeKind::UInt), Some(BaseTypeKind::Int)) => as_signed(ex),
+                            // Use AsUnsigned for int -> uint
+                            (Some(BaseTypeKind::Int), Some(BaseTypeKind::UInt)) => as_unsigned(ex),
+                            (None, None) => ex,
+                            _ => unreachable!("unsupported conversion case from {from_base_type:?} to {to_base_type:?} hit"),
+                        }
+                    } else {
+                        ex
+                    }
+                } else {
+                    ex
+                }
+            } else {
+                ex
+            }
+        };
+
+        let args = ctx
+            .signature
+            .arguments
+            .iter()
+            .filter_map(|arg| {
+                let var = Expression::Identifier(arg.name.to_owned(), IdentifierType::Variable);
+                if drop_var.as_ref().map(|v| v.to_string()) != Some(arg.name.to_string()) {
+                    Some(convert_if_required(
+                        arg.kind.wildcard(),
+                        &ctx.input,
+                        target_inputset,
+                        var,
+                    ))
+                } else {
+                    None
+                }
+            })
+            .collect_vec();
+
+        let turbofish = self
+            .signature
+            .static_defs
+            .iter()
+            .map(|def| {
+                let name = match def {
+                    StaticDefinition::Constant(Argument { name, .. }) => name.to_string(),
+                    StaticDefinition::Generic(name) => name.to_string(),
+                };
+                Expression::Identifier(name.into(), IdentifierType::Symbol)
+            })
+            .collect_vec();
+
+        let ret_wildcard = ctx
+            .signature
+            .return_type
+            .as_ref()
+            .and_then(|t| t.wildcard());
+        let call = FnCall(Box::new(target_signature.fn_name().into()), args, turbofish).into();
+
+        self.compose = vec![convert_if_required(
+            ret_wildcard,
+            target_inputset,
+            &ctx.input,
+            call,
+        )];
+
+        Ok(())
+    }
+}
+
+impl ToTokens for Intrinsic {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        let signature = &self.signature;
+        let fn_name = signature.fn_name().to_string();
+        let target_feature = self.target_features.join(",");
+        let safety = self
+            .safety
+            .as_ref()
+            .expect("safety should be determined during `pre_build`");
+
+        if let Some(doc) = &self.doc {
+            let mut doc = vec![doc.to_string()];
+
+            doc.push(String::new());
+            doc.push(format!("[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/{})", &signature.doc_name()));
+
+            if safety.has_doc_comments() {
+                doc.push(String::new());
+                doc.push("## Safety".to_string());
+                for comment in safety.doc_comments() {
+                    doc.push(format!("  * {comment}"));
+                }
+            } else {
+                assert!(
+                    safety.is_safe(),
+                    "{fn_name} is both public and unsafe, and so needs safety documentation"
+                );
+            }
+
+            tokens.append_all(quote! { #(#[doc = #doc])* });
+        } else {
+            assert!(
+                matches!(self.visibility, FunctionVisibility::Private),
+                "{fn_name} needs to be private, or to have documentation."
+            );
+            assert!(
+                !safety.has_doc_comments(),
+                "{fn_name} needs a documentation section for its safety comments."
+            );
+        }
+
+        tokens.append_all(quote! {
+            #[inline]
+            #[target_feature(enable = #target_feature)]
+        });
+
+        if !self.assert_instr.is_empty() {
+            InstructionAssertionsForBaseType(&self.assert_instr, &self.base_type.as_ref())
+                .to_tokens(tokens)
+        }
+
+        match &self.visibility {
+            FunctionVisibility::Public => tokens.append_all(quote! { pub }),
+            FunctionVisibility::Private => {}
+        }
+        if safety.is_unsafe() {
+            tokens.append_all(quote! { unsafe });
+        }
+        tokens.append_all(quote! { #signature });
+        tokens.append(Punct::new('{', Spacing::Alone));
+
+        let mut body_unsafe = false;
+        let mut expressions = self.compose.iter().peekable();
+        while let Some(ex) = expressions.next() {
+            if !body_unsafe && safety.is_safe() && ex.requires_unsafe_wrapper(&fn_name) {
+                body_unsafe = true;
+                tokens.append_all(quote! { unsafe });
+                tokens.append(Punct::new('{', Spacing::Alone));
+            }
+            // If it's not the last and not a LLVM link, add a trailing semicolon
+            if expressions.peek().is_some() && !matches!(ex, Expression::LLVMLink(_)) {
+                tokens.append_all(quote! { #ex; })
+            } else {
+                ex.to_tokens(tokens)
+            }
+        }
+        if body_unsafe {
+            tokens.append(Punct::new('}', Spacing::Alone));
+        }
+
+        tokens.append(Punct::new('}', Spacing::Alone));
+    }
+}
diff --git a/crates/stdarch-gen2/src/load_store_tests.rs b/crates/stdarch-gen2/src/load_store_tests.rs
new file mode 100644
index 0000000000..d697a8d22d
--- /dev/null
+++ b/crates/stdarch-gen2/src/load_store_tests.rs
@@ -0,0 +1,818 @@
+use std::fs::File;
+use std::io::Write;
+use std::path::PathBuf;
+use std::str::FromStr;
+
+use crate::format_code;
+use crate::input::InputType;
+use crate::intrinsic::Intrinsic;
+use crate::typekinds::BaseType;
+use crate::typekinds::{ToRepr, TypeKind};
+
+use itertools::Itertools;
+use lazy_static::lazy_static;
+use proc_macro2::TokenStream;
+use quote::{format_ident, quote};
+
+// Number of vectors in our buffers - the maximum tuple size, 4, plus 1 as we set the vnum
+// argument to 1.
+const NUM_VECS: usize = 5;
+// The maximum vector length (in bits)
+const VL_MAX_BITS: usize = 2048;
+// The maximum vector length (in bytes)
+const VL_MAX_BYTES: usize = VL_MAX_BITS / 8;
+// The maximum number of elements in each vector type
+const LEN_F32: usize = VL_MAX_BYTES / core::mem::size_of::<f32>();
+const LEN_F64: usize = VL_MAX_BYTES / core::mem::size_of::<f64>();
+const LEN_I8: usize = VL_MAX_BYTES / core::mem::size_of::<i8>();
+const LEN_I16: usize = VL_MAX_BYTES / core::mem::size_of::<i16>();
+const LEN_I32: usize = VL_MAX_BYTES / core::mem::size_of::<i32>();
+const LEN_I64: usize = VL_MAX_BYTES / core::mem::size_of::<i64>();
+const LEN_U8: usize = VL_MAX_BYTES / core::mem::size_of::<u8>();
+const LEN_U16: usize = VL_MAX_BYTES / core::mem::size_of::<u16>();
+const LEN_U32: usize = VL_MAX_BYTES / core::mem::size_of::<u32>();
+const LEN_U64: usize = VL_MAX_BYTES / core::mem::size_of::<u64>();
+
+/// `load_intrinsics` and `store_intrinsics` is a vector of intrinsics
+/// variants, while `out_path` is a file to write to.
+pub fn generate_load_store_tests(
+    load_intrinsics: Vec<Intrinsic>,
+    store_intrinsics: Vec<Intrinsic>,
+    out_path: Option<&PathBuf>,
+) -> Result<(), String> {
+    let output = match out_path {
+        Some(out) => {
+            Box::new(File::create(out).map_err(|e| format!("couldn't create tests file: {e}"))?)
+                as Box<dyn Write>
+        }
+        None => Box::new(std::io::stdout()) as Box<dyn Write>,
+    };
+    let mut used_stores = vec![false; store_intrinsics.len()];
+    let tests: Vec<_> = load_intrinsics
+        .iter()
+        .map(|load| {
+            let store_candidate = load
+                .signature
+                .fn_name()
+                .to_string()
+                .replace("svld1s", "svst1")
+                .replace("svld1u", "svst1")
+                .replace("svldnt1s", "svstnt1")
+                .replace("svldnt1u", "svstnt1")
+                .replace("svld", "svst")
+                .replace("gather", "scatter");
+
+            let store_index = store_intrinsics
+                .iter()
+                .position(|i| i.signature.fn_name().to_string() == store_candidate);
+            if let Some(i) = store_index {
+                used_stores[i] = true;
+            }
+
+            generate_single_test(
+                load.clone(),
+                store_index.map(|i| store_intrinsics[i].clone()),
+            )
+        })
+        .try_collect()?;
+
+    assert!(used_stores.into_iter().all(|b| b), "Not all store tests have been paired with a load. Consider generating specifc store-only tests");
+
+    let preamble =
+        TokenStream::from_str(&PREAMBLE).map_err(|e| format!("Preamble is invalid: {e}"))?;
+    // Only output manual tests for the SVE set
+    let manual_tests = match &load_intrinsics[0].target_features[..] {
+        [s] if s == "sve" => TokenStream::from_str(&MANUAL_TESTS)
+            .map_err(|e| format!("Manual tests are invalid: {e}"))?,
+        _ => quote!(),
+    };
+    format_code(
+        output,
+        format!(
+            "// This code is automatically generated. DO NOT MODIFY.
+//
+// Instead, modify `crates/stdarch-gen2/spec/sve` and run the following command to re-generate this
+// file:
+//
+// ```
+// cargo run --bin=stdarch-gen2 -- crates/stdarch-gen2/spec
+// ```
+{}",
+            quote! { #preamble #(#tests)* #manual_tests }
+        ),
+    )
+    .map_err(|e| format!("couldn't write tests: {e}"))
+}
+
+/// A test looks like this:
+/// ```
+///     let data = [scalable vector];
+///
+///     let mut storage = [0; N];
+///
+///     store_intrinsic([true_predicate], storage.as_mut_ptr(), data);
+///     [test contents of storage]
+///
+///     let loaded == load_intrinsic([true_predicate], storage.as_ptr())
+///     assert!(loaded == data);
+/// ```
+/// We intialise our data such that the value stored matches the index it's stored to.
+/// By doing this we can validate scatters by checking that each value in the storage
+/// array is either 0 or the same as its index.
+fn generate_single_test(
+    load: Intrinsic,
+    store: Option<Intrinsic>,
+) -> Result<proc_macro2::TokenStream, String> {
+    let chars = LdIntrCharacteristics::new(&load)?;
+    let fn_name = load.signature.fn_name().to_string();
+
+    if let Some(ty) = &chars.gather_bases_type {
+        if ty.base_type().unwrap().get_size() == Ok(32)
+            && chars.gather_index_type.is_none()
+            && chars.gather_offset_type.is_none()
+        {
+            // We lack a way to ensure data is in the bottom 32 bits of the address space
+            println!("Skipping test for {fn_name}");
+            return Ok(quote!());
+        }
+    }
+
+    if fn_name.starts_with("svldff1") && fn_name.contains("gather") {
+        // TODO: We can remove this check when first-faulting gathers are fixed in CI's QEMU
+        // https://gitlab.com/qemu-project/qemu/-/issues/1612
+        println!("Skipping test for {fn_name}");
+        return Ok(quote!());
+    }
+
+    let fn_ident = format_ident!("{fn_name}");
+    let test_name = format_ident!(
+        "test_{fn_name}{}",
+        if let Some(ref store) = store {
+            format!("_with_{}", store.signature.fn_name())
+        } else {
+            String::new()
+        }
+    );
+
+    let load_type = &chars.load_type;
+    let acle_type = load_type.acle_notation_repr();
+
+    // If there's no return type, fallback to the load type for things that depend on it
+    let ret_type = &load
+        .signature
+        .return_type
+        .as_ref()
+        .and_then(TypeKind::base_type)
+        .unwrap_or(load_type);
+
+    let pred_fn = format_ident!("svptrue_b{}", load_type.size());
+
+    let load_type_caps = load_type.rust_repr().to_uppercase();
+    let data_array = format_ident!("{load_type_caps}_DATA");
+
+    let size_fn = format_ident!("svcnt{}", ret_type.size_literal());
+
+    let rust_ret_type = ret_type.rust_repr();
+    let assert_fn = format_ident!("assert_vector_matches_{rust_ret_type}");
+
+    // Use vnum=1, so adjust all values by one vector length
+    let (length_call, vnum_arg) = if chars.vnum {
+        if chars.is_prf {
+            (quote!(), quote!(, 1))
+        } else {
+            (quote!(let len = #size_fn() as usize;), quote!(, 1))
+        }
+    } else {
+        (quote!(), quote!())
+    };
+
+    let (bases_load, bases_arg) = if let Some(ty) = &chars.gather_bases_type {
+        // Bases is a vector of (sometimes 32-bit) pointers
+        // When we combine bases with an offset/index argument, we load from the data arrays
+        // starting at 1
+        let base_ty = ty.base_type().unwrap();
+        let rust_type = format_ident!("{}", base_ty.rust_repr());
+        let index_fn = format_ident!("svindex_{}", base_ty.acle_notation_repr());
+        let size_in_bytes = chars.load_type.get_size().unwrap() / 8;
+
+        if base_ty.get_size().unwrap() == 32 {
+            // Treat bases as a vector of offsets here - we don't test this without an offset or
+            // index argument
+            (
+                Some(quote!(
+                    let bases = #index_fn(0, #size_in_bytes.try_into().unwrap());
+                )),
+                quote!(, bases),
+            )
+        } else {
+            // Treat bases as a vector of pointers
+            let base_fn = format_ident!("svdup_n_{}", base_ty.acle_notation_repr());
+            let data_array = if store.is_some() {
+                format_ident!("storage")
+            } else {
+                format_ident!("{}_DATA", chars.load_type.rust_repr().to_uppercase())
+            };
+
+            let add_fn = format_ident!("svadd_{}_x", base_ty.acle_notation_repr());
+            (
+                Some(quote! {
+                    let bases = #base_fn(#data_array.as_ptr() as #rust_type);
+                    let offsets = #index_fn(0, #size_in_bytes.try_into().unwrap());
+                    let bases = #add_fn(#pred_fn(), bases, offsets);
+                }),
+                quote!(, bases),
+            )
+        }
+    } else {
+        (None, quote!())
+    };
+
+    let index_arg = if let Some(ty) = &chars.gather_index_type {
+        let rust_type = format_ident!("{}", ty.rust_repr());
+        if chars
+            .gather_bases_type
+            .as_ref()
+            .and_then(TypeKind::base_type)
+            .map_or(Err(String::new()), BaseType::get_size)
+            .unwrap()
+            == 32
+        {
+            // Let index be the base of the data array
+            let data_array = if store.is_some() {
+                format_ident!("storage")
+            } else {
+                format_ident!("{}_DATA", chars.load_type.rust_repr().to_uppercase())
+            };
+            let size_in_bytes = chars.load_type.get_size().unwrap() / 8;
+            quote!(, #data_array.as_ptr() as #rust_type / (#size_in_bytes as #rust_type) + 1)
+        } else {
+            quote!(, 1.try_into().unwrap())
+        }
+    } else {
+        quote!()
+    };
+
+    let offset_arg = if let Some(ty) = &chars.gather_offset_type {
+        let size_in_bytes = chars.load_type.get_size().unwrap() / 8;
+        if chars
+            .gather_bases_type
+            .as_ref()
+            .and_then(TypeKind::base_type)
+            .map_or(Err(String::new()), BaseType::get_size)
+            .unwrap()
+            == 32
+        {
+            // Let offset be the base of the data array
+            let rust_type = format_ident!("{}", ty.rust_repr());
+            let data_array = if store.is_some() {
+                format_ident!("storage")
+            } else {
+                format_ident!("{}_DATA", chars.load_type.rust_repr().to_uppercase())
+            };
+            quote!(, #data_array.as_ptr() as #rust_type + #size_in_bytes as #rust_type)
+        } else {
+            quote!(, #size_in_bytes.try_into().unwrap())
+        }
+    } else {
+        quote!()
+    };
+
+    let (offsets_load, offsets_arg) = if let Some(ty) = &chars.gather_offsets_type {
+        // Offsets is a scalable vector of per-element offsets in bytes. We re-use the contiguous
+        // data for this, then multiply to get indices
+        let offsets_fn = format_ident!("svindex_{}", ty.base_type().unwrap().acle_notation_repr());
+        let size_in_bytes = chars.load_type.get_size().unwrap() / 8;
+        (
+            Some(quote! {
+                let offsets = #offsets_fn(0, #size_in_bytes.try_into().unwrap());
+            }),
+            quote!(, offsets),
+        )
+    } else {
+        (None, quote!())
+    };
+
+    let (indices_load, indices_arg) = if let Some(ty) = &chars.gather_indices_type {
+        // There's no need to multiply indices by the load type width
+        let base_ty = ty.base_type().unwrap();
+        let indices_fn = format_ident!("svindex_{}", base_ty.acle_notation_repr());
+        (
+            Some(quote! {
+                let indices = #indices_fn(0, 1);
+            }),
+            quote! {, indices},
+        )
+    } else {
+        (None, quote!())
+    };
+
+    let ptr = if chars.gather_bases_type.is_some() {
+        quote!()
+    } else if chars.is_prf {
+        quote!(, I64_DATA.as_ptr())
+    } else {
+        quote!(, #data_array.as_ptr())
+    };
+
+    let tuple_len = &chars.tuple_len;
+    let expecteds = if chars.is_prf {
+        // No return value for prefetches
+        vec![]
+    } else {
+        (0..*tuple_len)
+            .map(|i| get_expected_range(i, &chars))
+            .collect()
+    };
+    let asserts: Vec<_> =
+        if *tuple_len > 1 {
+            let svget = format_ident!("svget{tuple_len}_{acle_type}");
+            expecteds.iter().enumerate().map(|(i, expected)| {
+            quote! (#assert_fn(#svget::<{ #i as i32 }>(loaded), #expected);)
+        }).collect()
+        } else {
+            expecteds
+                .iter()
+                .map(|expected| quote! (#assert_fn(loaded, #expected);))
+                .collect()
+        };
+
+    let function = if chars.is_prf {
+        if fn_name.contains("gather") && fn_name.contains("base") && !fn_name.starts_with("svprf_")
+        {
+            // svprf(b|h|w|d)_gather base intrinsics do not have a generic type parameter
+            quote!(#fn_ident::<{ svprfop::SV_PLDL1KEEP }>)
+        } else {
+            quote!(#fn_ident::<{ svprfop::SV_PLDL1KEEP }, i64>)
+        }
+    } else {
+        quote!(#fn_ident)
+    };
+
+    let octaword_guard = if chars.replicate_width == Some(256) {
+        let msg = format!("Skipping {test_name} due to SVE vector length");
+        quote! {
+            if svcntb() < 32 {
+                println!(#msg);
+                return;
+            }
+        }
+    } else {
+        quote!()
+    };
+
+    let feats = load.target_features.join(",");
+
+    if let Some(store) = store {
+        let data_init = if *tuple_len == 1 {
+            quote!(#(#expecteds)*)
+        } else {
+            let create = format_ident!("svcreate{tuple_len}_{acle_type}");
+            quote!(#create(#(#expecteds),*))
+        };
+        let input = store.input.types.get(0).unwrap().get(0).unwrap();
+        let store_type = input
+            .get(store.test.get_typeset_index().unwrap())
+            .and_then(InputType::typekind)
+            .and_then(TypeKind::base_type)
+            .unwrap();
+
+        let store_type = format_ident!("{}", store_type.rust_repr());
+        let storage_len = NUM_VECS * VL_MAX_BITS / chars.load_type.get_size()? as usize;
+        let store_fn = format_ident!("{}", store.signature.fn_name().to_string());
+        let load_type = format_ident!("{}", chars.load_type.rust_repr());
+        let (store_ptr, store_mut_ptr) = if chars.gather_bases_type.is_none() {
+            (
+                quote!(, storage.as_ptr() as *const #load_type),
+                quote!(, storage.as_mut_ptr()),
+            )
+        } else {
+            (quote!(), quote!())
+        };
+        let args = quote!(#pred_fn() #store_ptr #vnum_arg #bases_arg #offset_arg #index_arg #offsets_arg #indices_arg);
+        let call = if chars.uses_ffr {
+            // Doing a normal load first maximises the number of elements our ff/nf test loads
+            let non_ffr_fn_name = format_ident!(
+                "{}",
+                fn_name
+                    .replace("svldff1", "svld1")
+                    .replace("svldnf1", "svld1")
+            );
+            quote! {
+                svsetffr();
+                let _ = #non_ffr_fn_name(#args);
+                let loaded = #function(#args);
+            }
+        } else {
+            // Note that the FFR must be set for all tests as the assert functions mask against it
+            quote! {
+                svsetffr();
+                let loaded = #function(#args);
+            }
+        };
+
+        Ok(quote! {
+            #[simd_test(enable = #feats)]
+            unsafe fn #test_name() {
+                #octaword_guard
+                #length_call
+                let mut storage = [0 as #store_type; #storage_len];
+                let data = #data_init;
+                #bases_load
+                #offsets_load
+                #indices_load
+
+                #store_fn(#pred_fn() #store_mut_ptr #vnum_arg #bases_arg #offset_arg #index_arg #offsets_arg #indices_arg, data);
+                for (i, &val) in storage.iter().enumerate() {
+                    assert!(val == 0 as #store_type || val == i as #store_type);
+                }
+
+                #call
+                #(#asserts)*
+
+            }
+        })
+    } else {
+        let args = quote!(#pred_fn() #ptr #vnum_arg #bases_arg #offset_arg #index_arg #offsets_arg #indices_arg);
+        let call = if chars.uses_ffr {
+            // Doing a normal load first maximises the number of elements our ff/nf test loads
+            let non_ffr_fn_name = format_ident!(
+                "{}",
+                fn_name
+                    .replace("svldff1", "svld1")
+                    .replace("svldnf1", "svld1")
+            );
+            quote! {
+                svsetffr();
+                let _ = #non_ffr_fn_name(#args);
+                let loaded = #function(#args);
+            }
+        } else {
+            // Note that the FFR must be set for all tests as the assert functions mask against it
+            quote! {
+                svsetffr();
+                let loaded = #function(#args);
+            }
+        };
+        Ok(quote! {
+            #[simd_test(enable = #feats)]
+            unsafe fn #test_name() {
+                #octaword_guard
+                #bases_load
+                #offsets_load
+                #indices_load
+                #call
+                #length_call
+
+                #(#asserts)*
+            }
+        })
+    }
+}
+
+/// Assumes chars.ret_type is not None
+fn get_expected_range(tuple_idx: usize, chars: &LdIntrCharacteristics) -> proc_macro2::TokenStream {
+    // vnum=1
+    let vnum_adjust = if chars.vnum { quote!(len+) } else { quote!() };
+
+    let bases_adjust =
+        (chars.gather_index_type.is_some() || chars.gather_offset_type.is_some()) as usize;
+
+    let tuple_len = chars.tuple_len;
+    let size = chars
+        .ret_type
+        .as_ref()
+        .and_then(TypeKind::base_type)
+        .unwrap_or(&chars.load_type)
+        .get_size()
+        .unwrap() as usize;
+
+    if chars.replicate_width == Some(128) {
+        // svld1rq
+        let ty_rust = format_ident!(
+            "{}",
+            chars
+                .ret_type
+                .as_ref()
+                .unwrap()
+                .base_type()
+                .unwrap()
+                .rust_repr()
+        );
+        let args: Vec<_> = (0..(128 / size)).map(|i| quote!(#i as #ty_rust)).collect();
+        let dup = format_ident!(
+            "svdupq_n_{}",
+            chars.ret_type.as_ref().unwrap().acle_notation_repr()
+        );
+        quote!(#dup(#(#args,)*))
+    } else if chars.replicate_width == Some(256) {
+        // svld1ro - we use two interleaved svdups to create a repeating 256-bit pattern
+        let ty_rust = format_ident!(
+            "{}",
+            chars
+                .ret_type
+                .as_ref()
+                .unwrap()
+                .base_type()
+                .unwrap()
+                .rust_repr()
+        );
+        let ret_acle = chars.ret_type.as_ref().unwrap().acle_notation_repr();
+        let args: Vec<_> = (0..(128 / size)).map(|i| quote!(#i as #ty_rust)).collect();
+        let args2: Vec<_> = ((128 / size)..(256 / size))
+            .map(|i| quote!(#i as #ty_rust))
+            .collect();
+        let dup = format_ident!("svdupq_n_{ret_acle}");
+        let interleave = format_ident!("svtrn1q_{ret_acle}");
+        quote!(#interleave(#dup(#(#args,)*), #dup(#(#args2,)*)))
+    } else {
+        let start = bases_adjust + tuple_idx;
+        if chars
+            .ret_type
+            .as_ref()
+            .unwrap()
+            .base_type()
+            .unwrap()
+            .is_float()
+        {
+            // Use svcvt to create a linear sequence of floats
+            let cvt_fn = format_ident!("svcvt_f{size}_s{size}_x");
+            let pred_fn = format_ident!("svptrue_b{size}");
+            let svindex_fn = format_ident!("svindex_s{size}");
+            quote! { #cvt_fn(#pred_fn(), #svindex_fn((#vnum_adjust #start).try_into().unwrap(), #tuple_len.try_into().unwrap()))}
+        } else {
+            let ret_acle = chars.ret_type.as_ref().unwrap().acle_notation_repr();
+            let svindex = format_ident!("svindex_{ret_acle}");
+            quote!(#svindex((#vnum_adjust #start).try_into().unwrap(), #tuple_len.try_into().unwrap()))
+        }
+    }
+}
+
+struct LdIntrCharacteristics {
+    // The data type to load from (not necessarily the data type returned)
+    load_type: BaseType,
+    // The data type to return (None for unit)
+    ret_type: Option<TypeKind>,
+    // The size of tuple to load/store
+    tuple_len: usize,
+    // Whether a vnum argument is present
+    vnum: bool,
+    // Is the intrinsic first/non-faulting?
+    uses_ffr: bool,
+    // Is it a prefetch?
+    is_prf: bool,
+    // The size of data loaded with svld1ro/q intrinsics
+    replicate_width: Option<usize>,
+    // Scalable vector of pointers to load from
+    gather_bases_type: Option<TypeKind>,
+    // Scalar offset, paired with bases
+    gather_offset_type: Option<TypeKind>,
+    // Scalar index, paired with bases
+    gather_index_type: Option<TypeKind>,
+    // Scalable vector of offsets
+    gather_offsets_type: Option<TypeKind>,
+    // Scalable vector of indices
+    gather_indices_type: Option<TypeKind>,
+}
+
+impl LdIntrCharacteristics {
+    fn new(intr: &Intrinsic) -> Result<LdIntrCharacteristics, String> {
+        let input = intr.input.types.get(0).unwrap().get(0).unwrap();
+        let load_type = input
+            .get(intr.test.get_typeset_index().unwrap())
+            .and_then(InputType::typekind)
+            .and_then(TypeKind::base_type)
+            .unwrap();
+
+        let ret_type = intr.signature.return_type.clone();
+
+        let name = intr.signature.fn_name().to_string();
+        let tuple_len = name
+            .chars()
+            .find(|c| c.is_numeric())
+            .and_then(|c| c.to_digit(10))
+            .unwrap_or(1) as usize;
+
+        let uses_ffr = name.starts_with("svldff") || name.starts_with("svldnf");
+
+        let is_prf = name.starts_with("svprf");
+
+        let replicate_width = if name.starts_with("svld1ro") {
+            Some(256)
+        } else if name.starts_with("svld1rq") {
+            Some(128)
+        } else {
+            None
+        };
+
+        let get_ty_of_arg = |name: &str| {
+            intr.signature
+                .arguments
+                .iter()
+                .find(|a| a.name.to_string() == name)
+                .map(|a| a.kind.clone())
+        };
+
+        let gather_bases_type = get_ty_of_arg("bases");
+        let gather_offset_type = get_ty_of_arg("offset");
+        let gather_index_type = get_ty_of_arg("index");
+        let gather_offsets_type = get_ty_of_arg("offsets");
+        let gather_indices_type = get_ty_of_arg("indices");
+
+        Ok(LdIntrCharacteristics {
+            load_type: *load_type,
+            ret_type,
+            tuple_len,
+            vnum: name.contains("vnum"),
+            uses_ffr,
+            is_prf,
+            replicate_width,
+            gather_bases_type,
+            gather_offset_type,
+            gather_index_type,
+            gather_offsets_type,
+            gather_indices_type,
+        })
+    }
+}
+
+lazy_static! {
+    static ref PREAMBLE: String = format!(
+        r#"#![allow(unused)]
+
+use super::*;
+use std::boxed::Box;
+use std::convert::{{TryFrom, TryInto}};
+use std::sync::LazyLock;
+use std::vec::Vec;
+use stdarch_test::simd_test;
+
+static F32_DATA: LazyLock<[f32; {LEN_F32} * {NUM_VECS}]> = LazyLock::new(|| {{
+    (0..{LEN_F32} * {NUM_VECS})
+        .map(|i| i as f32)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("f32 data incorrectly initialised")
+}});
+static F64_DATA: LazyLock<[f64; {LEN_F64} * {NUM_VECS}]> = LazyLock::new(|| {{
+    (0..{LEN_F64} * {NUM_VECS})
+        .map(|i| i as f64)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("f64 data incorrectly initialised")
+}});
+static I8_DATA: LazyLock<[i8; {LEN_I8} * {NUM_VECS}]> = LazyLock::new(|| {{
+    (0..{LEN_I8} * {NUM_VECS})
+        .map(|i| ((i + 128) % 256 - 128) as i8)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("i8 data incorrectly initialised")
+}});
+static I16_DATA: LazyLock<[i16; {LEN_I16} * {NUM_VECS}]> = LazyLock::new(|| {{
+    (0..{LEN_I16} * {NUM_VECS})
+        .map(|i| i as i16)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("i16 data incorrectly initialised")
+}});
+static I32_DATA: LazyLock<[i32; {LEN_I32} * {NUM_VECS}]> = LazyLock::new(|| {{
+    (0..{LEN_I32} * {NUM_VECS})
+        .map(|i| i as i32)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("i32 data incorrectly initialised")
+}});
+static I64_DATA: LazyLock<[i64; {LEN_I64} * {NUM_VECS}]> = LazyLock::new(|| {{
+    (0..{LEN_I64} * {NUM_VECS})
+        .map(|i| i as i64)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("i64 data incorrectly initialised")
+}});
+static U8_DATA: LazyLock<[u8; {LEN_U8} * {NUM_VECS}]> = LazyLock::new(|| {{
+    (0..{LEN_U8} * {NUM_VECS})
+        .map(|i| i as u8)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("u8 data incorrectly initialised")
+}});
+static U16_DATA: LazyLock<[u16; {LEN_U16} * {NUM_VECS}]> = LazyLock::new(|| {{
+    (0..{LEN_U16} * {NUM_VECS})
+        .map(|i| i as u16)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("u16 data incorrectly initialised")
+}});
+static U32_DATA: LazyLock<[u32; {LEN_U32} * {NUM_VECS}]> = LazyLock::new(|| {{
+    (0..{LEN_U32} * {NUM_VECS})
+        .map(|i| i as u32)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("u32 data incorrectly initialised")
+}});
+static U64_DATA: LazyLock<[u64; {LEN_U64} * {NUM_VECS}]> = LazyLock::new(|| {{
+    (0..{LEN_U64} * {NUM_VECS})
+        .map(|i| i as u64)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("u64 data incorrectly initialised")
+}});
+
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_f32(vector: svfloat32_t, expected: svfloat32_t) {{
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b32(), defined));
+    let cmp = svcmpne_f32(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}}
+
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_f64(vector: svfloat64_t, expected: svfloat64_t) {{
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b64(), defined));
+    let cmp = svcmpne_f64(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}}
+
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_i8(vector: svint8_t, expected: svint8_t) {{
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b8(), defined));
+    let cmp = svcmpne_s8(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}}
+
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_i16(vector: svint16_t, expected: svint16_t) {{
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b16(), defined));
+    let cmp = svcmpne_s16(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}}
+
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_i32(vector: svint32_t, expected: svint32_t) {{
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b32(), defined));
+    let cmp = svcmpne_s32(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}}
+
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_i64(vector: svint64_t, expected: svint64_t) {{
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b64(), defined));
+    let cmp = svcmpne_s64(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}}
+
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_u8(vector: svuint8_t, expected: svuint8_t) {{
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b8(), defined));
+    let cmp = svcmpne_u8(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}}
+
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_u16(vector: svuint16_t, expected: svuint16_t) {{
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b16(), defined));
+    let cmp = svcmpne_u16(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}}
+
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_u32(vector: svuint32_t, expected: svuint32_t) {{
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b32(), defined));
+    let cmp = svcmpne_u32(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}}
+
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_u64(vector: svuint64_t, expected: svuint64_t) {{
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b64(), defined));
+    let cmp = svcmpne_u64(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}}
+"#
+    );
+}
+
+lazy_static! {
+    static ref MANUAL_TESTS: String = format!(
+        "#[simd_test(enable = \"sve\")]
+unsafe fn test_ffr() {{
+    svsetffr();
+    let ffr = svrdffr();
+    assert_vector_matches_u8(svdup_n_u8_z(ffr, 1), svindex_u8(1, 0));
+    let pred = svdupq_n_b8(true, false, true, false, true, false, true, false,
+                           true, false, true, false, true, false, true, false);
+    svwrffr(pred);
+    let ffr = svrdffr_z(svptrue_b8());
+    assert_vector_matches_u8(svdup_n_u8_z(ffr, 1), svdup_n_u8_z(pred, 1));
+}}
+"
+    );
+}
diff --git a/crates/stdarch-gen2/src/main.rs b/crates/stdarch-gen2/src/main.rs
new file mode 100644
index 0000000000..5379d18404
--- /dev/null
+++ b/crates/stdarch-gen2/src/main.rs
@@ -0,0 +1,273 @@
+#![feature(pattern)]
+
+mod assert_instr;
+mod context;
+mod expression;
+mod input;
+mod intrinsic;
+mod load_store_tests;
+mod matching;
+mod predicate_forms;
+mod typekinds;
+mod wildcards;
+mod wildstring;
+
+use intrinsic::Test;
+use itertools::Itertools;
+use quote::quote;
+use std::fs::File;
+use std::io::Write;
+use std::path::{Path, PathBuf};
+use std::process::{Command, Stdio};
+use walkdir::WalkDir;
+
+fn main() -> Result<(), String> {
+    parse_args()
+        .into_iter()
+        .map(|(filepath, out)| {
+            File::open(&filepath)
+                .map(|f| (f, filepath, out))
+                .map_err(|e| format!("could not read input file: {e}"))
+        })
+        .map(|res| {
+            let (file, filepath, out) = res?;
+            serde_yaml::from_reader(file)
+                .map(|input: input::GeneratorInput| (input, filepath, out))
+                .map_err(|e| format!("could not parse input file: {e}"))
+        })
+        .collect::<Result<Vec<_>, _>>()?
+        .into_iter()
+        .map(|(input, filepath, out)| {
+            let intrinsics = input.intrinsics.into_iter()
+                .map(|intrinsic| intrinsic.generate_variants(&input.ctx))
+                .try_collect()
+                .map(|mut vv: Vec<_>| {
+                    vv.sort_by_cached_key(|variants| {
+                        variants.first().map_or_else(String::default, |variant| {
+                            variant.signature.fn_name().to_string()
+                        })
+                    });
+                    vv.into_iter().flatten().collect_vec()
+                })?;
+
+            let loads = intrinsics.iter()
+                .filter_map(|i| {
+                    if matches!(i.test, Test::Load(..)) {
+                        Some(i.clone())
+                    } else {
+                        None
+                    }
+                }).collect();
+            let stores = intrinsics.iter()
+                .filter_map(|i| {
+                    if matches!(i.test, Test::Store(..)) {
+                        Some(i.clone())
+                    } else {
+                        None
+                    }
+                }).collect();
+            load_store_tests::generate_load_store_tests(loads, stores, out.as_ref().map(|o| make_tests_filepath(&filepath, o)).as_ref())?;
+            Ok((
+                input::GeneratorInput {
+                    intrinsics,
+                    ctx: input.ctx,
+                },
+                filepath,
+                out,
+            ))
+        })
+        .try_for_each(
+            |result: context::Result<(input::GeneratorInput, PathBuf, Option<PathBuf>)>| -> context::Result {
+                let (generated, filepath, out) = result?;
+
+                let w = match out {
+                    Some(out) => Box::new(
+                        File::create(make_output_filepath(&filepath, &out))
+                            .map_err(|e| format!("could not create output file: {e}"))?,
+                    ) as Box<dyn Write>,
+                    None => Box::new(std::io::stdout()) as Box<dyn Write>,
+                };
+
+                generate_file(generated, w)
+                    .map_err(|e| format!("could not generate output file: {e}"))
+            },
+        )
+}
+
+fn parse_args() -> Vec<(PathBuf, Option<PathBuf>)> {
+    let mut args_it = std::env::args().skip(1);
+    assert!(
+        1 <= args_it.len() && args_it.len() <= 2,
+        "Usage: cargo run -p stdarch-gen2 -- INPUT_DIR [OUTPUT_DIR]"
+    );
+
+    let in_path = Path::new(args_it.next().unwrap().as_str()).to_path_buf();
+    assert!(
+        in_path.exists() && in_path.is_dir(),
+        "invalid path {in_path:#?} given"
+    );
+
+    let out_dir = if let Some(dir) = args_it.next() {
+        let out_path = Path::new(dir.as_str()).to_path_buf();
+        assert!(
+            out_path.exists() && out_path.is_dir(),
+            "invalid path {out_path:#?} given"
+        );
+        Some(out_path)
+    } else {
+        std::env::current_exe()
+            .map(|mut f| {
+                f.pop();
+                f.push("../../crates/core_arch/src/aarch64/");
+                f.exists().then_some(f)
+            })
+            .ok()
+            .flatten()
+    };
+
+    WalkDir::new(in_path)
+        .into_iter()
+        .filter_map(Result::ok)
+        .filter(|f| f.file_type().is_file())
+        .map(|f| (f.into_path(), out_dir.clone()))
+        .collect()
+}
+
+fn generate_file(
+    generated_input: input::GeneratorInput,
+    mut out: Box<dyn Write>,
+) -> std::io::Result<()> {
+    write!(
+        out,
+        r#"// This code is automatically generated. DO NOT MODIFY.
+//
+// Instead, modify `crates/stdarch-gen2/spec/` and run the following command to re-generate this file:
+//
+// ```
+// cargo run --bin=stdarch-gen2 -- crates/stdarch-gen2/spec
+// ```
+#![allow(improper_ctypes)]
+
+#[cfg(test)]
+use stdarch_test::assert_instr;
+
+use super::*;{uses_neon}
+
+"#,
+        uses_neon = generated_input
+            .ctx
+            .uses_neon_types
+            .then_some("\nuse crate::core_arch::arch::aarch64::*;")
+            .unwrap_or_default(),
+    )?;
+    let intrinsics = generated_input.intrinsics;
+    format_code(out, quote! { #(#intrinsics)* })?;
+    Ok(())
+}
+
+pub fn format_code(
+    mut output: impl std::io::Write,
+    input: impl std::fmt::Display,
+) -> std::io::Result<()> {
+    let proc = Command::new("rustfmt")
+        .stdin(Stdio::piped())
+        .stdout(Stdio::piped())
+        .spawn()?;
+    write!(proc.stdin.as_ref().unwrap(), "{input}")?;
+    output.write_all(proc.wait_with_output()?.stdout.as_slice())
+}
+
+/// Derive an output file name from an input file and an output directory.
+///
+/// The name is formed by:
+///
+///   - ... taking in_filepath.file_name() (dropping all directory components),
+///   - ... dropping a .yml or .yaml extension (if present),
+///   - ... then dropping a .spec extension (if present).
+///
+/// Panics if the resulting name is empty, or if file_name() is not UTF-8.
+fn make_output_filepath(in_filepath: &Path, out_dirpath: &Path) -> PathBuf {
+    make_filepath(in_filepath, out_dirpath, |name: &str| format!("{name}.rs"))
+}
+
+fn make_tests_filepath(in_filepath: &Path, out_dirpath: &Path) -> PathBuf {
+    make_filepath(in_filepath, out_dirpath, |name: &str| {
+        format!("ld_st_tests_{name}.rs")
+    })
+}
+
+fn make_filepath<F: FnOnce(&str) -> String>(
+    in_filepath: &Path,
+    out_dirpath: &Path,
+    name_formatter: F,
+) -> PathBuf {
+    let mut parts = in_filepath.iter();
+    let name = parts
+        .next_back()
+        .and_then(|f| f.to_str())
+        .expect("Inputs must have valid, UTF-8 file_name()");
+    let dir = parts.next_back().unwrap();
+
+    let name = name
+        .trim_end_matches(".yml")
+        .trim_end_matches(".yaml")
+        .trim_end_matches(".spec");
+    assert!(!name.is_empty());
+
+    let mut output = out_dirpath.to_path_buf();
+    output.push(dir);
+    output.push(name_formatter(name));
+    output
+}
+
+#[cfg(test)]
+mod tests {
+    use super::*;
+
+    #[test]
+    fn infer_output_file() {
+        macro_rules! t {
+            ($src:expr, $outdir:expr, $dst:expr) => {
+                let src: PathBuf = $src.iter().collect();
+                let outdir: PathBuf = $outdir.iter().collect();
+                let dst: PathBuf = $dst.iter().collect();
+                assert_eq!(make_output_filepath(&src, &outdir), dst);
+            };
+        }
+        // Documented usage.
+        t!(["x", "NAME.spec.yml"], [""], ["x", "NAME.rs"]);
+        t!(
+            ["x", "NAME.spec.yml"],
+            ["a", "b"],
+            ["a", "b", "x", "NAME.rs"]
+        );
+        t!(
+            ["x", "y", "NAME.spec.yml"],
+            ["out"],
+            ["out", "y", "NAME.rs"]
+        );
+        t!(["x", "NAME.spec.yaml"], ["out"], ["out", "x", "NAME.rs"]);
+        t!(["x", "NAME.spec"], ["out"], ["out", "x", "NAME.rs"]);
+        t!(["x", "NAME.yml"], ["out"], ["out", "x", "NAME.rs"]);
+        t!(["x", "NAME.yaml"], ["out"], ["out", "x", "NAME.rs"]);
+        // Unrecognised extensions get treated as part of the stem.
+        t!(
+            ["x", "NAME.spac.yml"],
+            ["out"],
+            ["out", "x", "NAME.spac.rs"]
+        );
+        t!(["x", "NAME.txt"], ["out"], ["out", "x", "NAME.txt.rs"]);
+        // Always take the top-level directory from the input path
+        t!(
+            ["x", "y", "z", "NAME.spec.yml"],
+            ["out"],
+            ["out", "z", "NAME.rs"]
+        );
+    }
+
+    #[test]
+    #[should_panic]
+    fn infer_output_file_no_stem() {
+        make_output_filepath(Path::new(".spec.yml"), Path::new(""));
+    }
+}
diff --git a/crates/stdarch-gen2/src/matching.rs b/crates/stdarch-gen2/src/matching.rs
new file mode 100644
index 0000000000..0c48062042
--- /dev/null
+++ b/crates/stdarch-gen2/src/matching.rs
@@ -0,0 +1,170 @@
+use proc_macro2::TokenStream;
+use quote::ToTokens;
+use serde::{Deserialize, Serialize};
+use std::fmt;
+
+use crate::context::{self, LocalContext};
+use crate::typekinds::{BaseType, BaseTypeKind, TypeKind};
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(deny_unknown_fields)]
+pub struct MatchSizeValues<T> {
+    pub default: T,
+    pub byte: Option<T>,
+    pub halfword: Option<T>,
+    pub doubleword: Option<T>,
+}
+
+impl<T> MatchSizeValues<T> {
+    pub fn get(&mut self, ty: &TypeKind, ctx: &LocalContext) -> context::Result<&T> {
+        let base_ty = if let Some(w) = ty.wildcard() {
+            ctx.provide_type_wildcard(w)?
+        } else {
+            ty.clone()
+        };
+
+        if let BaseType::Sized(_, bitsize) = base_ty.base_type().unwrap() {
+            match (bitsize, &self.byte, &self.halfword, &self.doubleword) {
+                (64, _, _, Some(v)) | (16, _, Some(v), _) | (8, Some(v), _, _) => Ok(v),
+                _ => Ok(&self.default),
+            }
+        } else {
+            Err(format!("cannot match bitsize to unsized type {ty:?}!"))
+        }
+    }
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(deny_unknown_fields)]
+pub struct MatchKindValues<T> {
+    pub default: T,
+    pub float: Option<T>,
+    pub unsigned: Option<T>,
+}
+
+impl<T> MatchKindValues<T> {
+    pub fn get(&mut self, ty: &TypeKind, ctx: &LocalContext) -> context::Result<&T> {
+        let base_ty = if let Some(w) = ty.wildcard() {
+            ctx.provide_type_wildcard(w)?
+        } else {
+            ty.clone()
+        };
+
+        match (
+            base_ty.base_type().unwrap().kind(),
+            &self.float,
+            &self.unsigned,
+        ) {
+            (BaseTypeKind::Float, Some(v), _) | (BaseTypeKind::UInt, _, Some(v)) => Ok(v),
+            _ => Ok(&self.default),
+        }
+    }
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(untagged, deny_unknown_fields)]
+pub enum SizeMatchable<T> {
+    Matched(T),
+    Unmatched {
+        match_size: Option<TypeKind>,
+        #[serde(flatten)]
+        values: MatchSizeValues<Box<T>>,
+    },
+}
+
+impl<T: Clone> SizeMatchable<T> {
+    pub fn perform_match(&mut self, ctx: &LocalContext) -> context::Result {
+        match self {
+            Self::Unmatched {
+                match_size: None,
+                values: MatchSizeValues { default, .. },
+            } => *self = Self::Matched(*default.to_owned()),
+            Self::Unmatched {
+                match_size: Some(ty),
+                values,
+            } => *self = Self::Matched(*values.get(ty, ctx)?.to_owned()),
+            _ => {}
+        }
+        Ok(())
+    }
+}
+
+impl<T: fmt::Debug> AsRef<T> for SizeMatchable<T> {
+    fn as_ref(&self) -> &T {
+        if let SizeMatchable::Matched(v) = self {
+            v
+        } else {
+            panic!("no match for {self:?} was performed");
+        }
+    }
+}
+
+impl<T: fmt::Debug> AsMut<T> for SizeMatchable<T> {
+    fn as_mut(&mut self) -> &mut T {
+        if let SizeMatchable::Matched(v) = self {
+            v
+        } else {
+            panic!("no match for {self:?} was performed");
+        }
+    }
+}
+
+impl<T: fmt::Debug + ToTokens> ToTokens for SizeMatchable<T> {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        self.as_ref().to_tokens(tokens)
+    }
+}
+
+#[derive(Debug, Clone, Serialize, Deserialize)]
+#[serde(untagged, deny_unknown_fields)]
+pub enum KindMatchable<T> {
+    Matched(T),
+    Unmatched {
+        match_kind: Option<TypeKind>,
+        #[serde(flatten)]
+        values: MatchKindValues<Box<T>>,
+    },
+}
+
+impl<T: Clone> KindMatchable<T> {
+    pub fn perform_match(&mut self, ctx: &LocalContext) -> context::Result {
+        match self {
+            Self::Unmatched {
+                match_kind: None,
+                values: MatchKindValues { default, .. },
+            } => *self = Self::Matched(*default.to_owned()),
+            Self::Unmatched {
+                match_kind: Some(ty),
+                values,
+            } => *self = Self::Matched(*values.get(ty, ctx)?.to_owned()),
+            _ => {}
+        }
+        Ok(())
+    }
+}
+
+impl<T: fmt::Debug> AsRef<T> for KindMatchable<T> {
+    fn as_ref(&self) -> &T {
+        if let KindMatchable::Matched(v) = self {
+            v
+        } else {
+            panic!("no match for {self:?} was performed");
+        }
+    }
+}
+
+impl<T: fmt::Debug> AsMut<T> for KindMatchable<T> {
+    fn as_mut(&mut self) -> &mut T {
+        if let KindMatchable::Matched(v) = self {
+            v
+        } else {
+            panic!("no match for {self:?} was performed");
+        }
+    }
+}
+
+impl<T: fmt::Debug + ToTokens> ToTokens for KindMatchable<T> {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        self.as_ref().to_tokens(tokens)
+    }
+}
diff --git a/crates/stdarch-gen2/src/predicate_forms.rs b/crates/stdarch-gen2/src/predicate_forms.rs
new file mode 100644
index 0000000000..02789bf7eb
--- /dev/null
+++ b/crates/stdarch-gen2/src/predicate_forms.rs
@@ -0,0 +1,249 @@
+use serde::{Deserialize, Serialize};
+use serde_with::{DeserializeFromStr, SerializeDisplay};
+use std::fmt;
+use std::str::FromStr;
+
+use crate::context;
+use crate::expression::{Expression, FnCall, IdentifierType};
+use crate::intrinsic::Intrinsic;
+use crate::typekinds::{ToRepr, TypeKind};
+use crate::wildcards::Wildcard;
+use crate::wildstring::WildString;
+
+const ZEROING_SUFFIX: &str = "_z";
+const MERGING_SUFFIX: &str = "_m";
+const DONT_CARE_SUFFIX: &str = "_x";
+
+#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
+#[serde(untagged)]
+pub enum ZeroingMethod {
+    /// Drop the specified argument and replace it with a zeroinitializer
+    Drop { drop: WildString },
+    /// Apply zero selection to the specified variable when zeroing
+    Select { select: WildString },
+}
+
+impl PartialOrd for ZeroingMethod {
+    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+impl Ord for ZeroingMethod {
+    fn cmp(&self, _: &Self) -> std::cmp::Ordering {
+        std::cmp::Ordering::Equal
+    }
+}
+
+#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
+pub enum DontCareMethod {
+    #[default]
+    Inferred,
+    AsZeroing,
+    AsMerging,
+}
+
+#[derive(Debug, Clone, Default, PartialEq, Eq, Deserialize, Serialize)]
+pub struct PredicationMethods {
+    /// Zeroing method, if the zeroing predicate form is used
+    #[serde(default)]
+    pub zeroing_method: Option<ZeroingMethod>,
+    /// Don't care method, if the don't care predicate form is used
+    #[serde(default)]
+    pub dont_care_method: DontCareMethod,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
+pub enum PredicateForm {
+    /// Enables merging predicate form
+    Merging,
+    /// Enables "don't care" predicate form.
+    DontCare(DontCareMethod),
+    /// Enables zeroing predicate form. If LLVM zeroselection is performed, then
+    /// set the `select` field to the variable that gets set. Otherwise set the
+    /// `drop` field if the zeroinitializer replaces a predicate when merging.
+    Zeroing(ZeroingMethod),
+}
+
+impl PredicateForm {
+    pub fn get_suffix(&self) -> &'static str {
+        match self {
+            PredicateForm::Zeroing { .. } => ZEROING_SUFFIX,
+            PredicateForm::Merging => MERGING_SUFFIX,
+            PredicateForm::DontCare { .. } => DONT_CARE_SUFFIX,
+        }
+    }
+
+    pub fn make_zeroinitializer(ty: &TypeKind) -> Expression {
+        FnCall::new_expression(
+            format!("svdup_n_{}", ty.acle_notation_repr())
+                .parse()
+                .unwrap(),
+            vec![if ty.base_type().unwrap().is_float() {
+                Expression::FloatConstant(0.0)
+            } else {
+                Expression::IntConstant(0)
+            }],
+        )
+    }
+
+    pub fn make_zeroselector(pg_var: WildString, op_var: WildString, ty: &TypeKind) -> Expression {
+        FnCall::new_expression(
+            format!("svsel_{}", ty.acle_notation_repr())
+                .parse()
+                .unwrap(),
+            vec![
+                Expression::Identifier(pg_var, IdentifierType::Variable),
+                Expression::Identifier(op_var, IdentifierType::Variable),
+                Self::make_zeroinitializer(ty),
+            ],
+        )
+    }
+
+    pub fn post_build(&self, intrinsic: &mut Intrinsic) -> context::Result {
+        // Drop the argument
+        match self {
+            PredicateForm::Zeroing(ZeroingMethod::Drop { drop: drop_var }) => {
+                intrinsic.signature.drop_argument(drop_var)?
+            }
+            PredicateForm::DontCare(DontCareMethod::AsZeroing) => {
+                if let ZeroingMethod::Drop { drop } = intrinsic
+                    .input
+                    .predication_methods
+                    .zeroing_method
+                    .to_owned()
+                    .ok_or_else(|| {
+                        "DontCareMethod::AsZeroing without zeroing method.".to_string()
+                    })?
+                {
+                    intrinsic.signature.drop_argument(&drop)?
+                }
+            }
+            _ => {}
+        }
+
+        Ok(())
+    }
+
+    fn infer_dont_care(mask: &PredicationMask, methods: &PredicationMethods) -> PredicateForm {
+        let method = if methods.dont_care_method == DontCareMethod::Inferred {
+            if mask.has_zeroing()
+                && matches!(methods.zeroing_method, Some(ZeroingMethod::Drop { .. }))
+            {
+                DontCareMethod::AsZeroing
+            } else {
+                DontCareMethod::AsMerging
+            }
+        } else {
+            methods.dont_care_method
+        };
+
+        PredicateForm::DontCare(method)
+    }
+
+    pub fn compile_list(
+        mask: &PredicationMask,
+        methods: &PredicationMethods,
+    ) -> context::Result<Vec<PredicateForm>> {
+        let mut forms = Vec::new();
+
+        if mask.has_merging() {
+            forms.push(PredicateForm::Merging)
+        }
+
+        if mask.has_dont_care() {
+            forms.push(Self::infer_dont_care(mask, methods))
+        }
+
+        if mask.has_zeroing() {
+            if let Some(method) = methods.zeroing_method.to_owned() {
+                forms.push(PredicateForm::Zeroing(method))
+            } else {
+                return Err(
+                    "cannot create a zeroing variant without a zeroing method specified!"
+                        .to_string(),
+                );
+            }
+        }
+
+        Ok(forms)
+    }
+}
+
+#[derive(
+    Debug, Clone, Copy, Default, PartialEq, Eq, Hash, DeserializeFromStr, SerializeDisplay,
+)]
+pub struct PredicationMask {
+    /// Merging
+    m: bool,
+    /// Don't care
+    x: bool,
+    /// Zeroing
+    z: bool,
+}
+
+impl PredicationMask {
+    pub fn has_merging(&self) -> bool {
+        self.m
+    }
+
+    pub fn has_dont_care(&self) -> bool {
+        self.x
+    }
+
+    pub fn has_zeroing(&self) -> bool {
+        self.z
+    }
+}
+
+impl FromStr for PredicationMask {
+    type Err = String;
+
+    fn from_str(s: &str) -> Result<Self, Self::Err> {
+        let mut result = Self::default();
+        for kind in s.bytes() {
+            match kind {
+                b'm' => result.m = true,
+                b'x' => result.x = true,
+                b'z' => result.z = true,
+                _ => {
+                    return Err(format!(
+                        "unknown predicate form modifier: {}",
+                        char::from(kind)
+                    ));
+                }
+            }
+        }
+
+        if result.m || result.x || result.z {
+            Ok(result)
+        } else {
+            Err("invalid predication mask".to_string())
+        }
+    }
+}
+
+impl fmt::Display for PredicationMask {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        self.m.then(|| write!(f, "m")).transpose()?;
+        self.x.then(|| write!(f, "x")).transpose()?;
+        self.z.then(|| write!(f, "z")).transpose().map(|_| ())
+    }
+}
+
+impl TryFrom<&WildString> for PredicationMask {
+    type Error = String;
+
+    fn try_from(value: &WildString) -> Result<Self, Self::Error> {
+        value
+            .wildcards()
+            .find_map(|w| {
+                if let Wildcard::PredicateForms(mask) = w {
+                    Some(*mask)
+                } else {
+                    None
+                }
+            })
+            .ok_or_else(|| "no predicate forms were specified in the name".to_string())
+    }
+}
diff --git a/crates/stdarch-gen2/src/typekinds.rs b/crates/stdarch-gen2/src/typekinds.rs
new file mode 100644
index 0000000000..71f6297d94
--- /dev/null
+++ b/crates/stdarch-gen2/src/typekinds.rs
@@ -0,0 +1,1024 @@
+use lazy_static::lazy_static;
+use proc_macro2::TokenStream;
+use quote::{quote, ToTokens, TokenStreamExt};
+use regex::Regex;
+use serde_with::{DeserializeFromStr, SerializeDisplay};
+use std::fmt;
+use std::str::FromStr;
+
+use crate::context;
+use crate::expression::{Expression, FnCall};
+use crate::intrinsic::AccessLevel;
+use crate::wildcards::Wildcard;
+
+const VECTOR_FULL_REGISTER_SIZE: u32 = 128;
+const VECTOR_HALF_REGISTER_SIZE: u32 = VECTOR_FULL_REGISTER_SIZE / 2;
+
+#[derive(Debug, Clone, Copy)]
+pub enum TypeRepr {
+    C,
+    Rust,
+    LLVMMachine,
+    ACLENotation,
+    Size,
+    SizeLiteral,
+    TypeKind,
+    SizeInBytesLog2,
+}
+
+pub trait ToRepr {
+    fn repr(&self, repr: TypeRepr) -> String;
+
+    fn c_repr(&self) -> String {
+        self.repr(TypeRepr::C)
+    }
+
+    fn rust_repr(&self) -> String {
+        self.repr(TypeRepr::Rust)
+    }
+
+    fn llvm_machine_repr(&self) -> String {
+        self.repr(TypeRepr::LLVMMachine)
+    }
+
+    fn acle_notation_repr(&self) -> String {
+        self.repr(TypeRepr::ACLENotation)
+    }
+
+    fn size(&self) -> String {
+        self.repr(TypeRepr::Size)
+    }
+
+    fn size_literal(&self) -> String {
+        self.repr(TypeRepr::SizeLiteral)
+    }
+
+    fn type_kind(&self) -> String {
+        self.repr(TypeRepr::TypeKind)
+    }
+
+    fn size_in_bytes_log2(&self) -> String {
+        self.repr(TypeRepr::SizeInBytesLog2)
+    }
+}
+
+#[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash)]
+pub struct TypeKindOptions {
+    f: bool,
+    s: bool,
+    u: bool,
+}
+
+impl TypeKindOptions {
+    pub fn contains(&self, kind: BaseTypeKind) -> bool {
+        match kind {
+            BaseTypeKind::Float => self.f,
+            BaseTypeKind::Int => self.s,
+            BaseTypeKind::UInt => self.u,
+            BaseTypeKind::Bool => false,
+        }
+    }
+}
+
+impl FromStr for TypeKindOptions {
+    type Err = String;
+
+    fn from_str(s: &str) -> Result<Self, Self::Err> {
+        let mut result = Self::default();
+        for kind in s.bytes() {
+            match kind {
+                b'f' => result.f = true,
+                b's' => result.s = true,
+                b'u' => result.u = true,
+                _ => {
+                    return Err(format!("unknown type kind: {}", char::from(kind)));
+                }
+            }
+        }
+        Ok(result)
+    }
+}
+
+impl fmt::Display for TypeKindOptions {
+    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+        self.f.then(|| write!(f, "f")).transpose()?;
+        self.s.then(|| write!(f, "s")).transpose()?;
+        self.u.then(|| write!(f, "u")).transpose().map(|_| ())
+    }
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub enum BaseTypeKind {
+    Float,
+    Int,
+    UInt,
+    Bool,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub enum BaseType {
+    Sized(BaseTypeKind, u32),
+    Unsized(BaseTypeKind),
+}
+
+#[derive(
+    Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, SerializeDisplay, DeserializeFromStr,
+)]
+pub enum VectorTupleSize {
+    Two,
+    Three,
+    Four,
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+pub struct VectorType {
+    base_type: BaseType,
+    lanes: u32,
+    is_scalable: bool,
+    tuple_size: Option<VectorTupleSize>,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq, Hash, SerializeDisplay, DeserializeFromStr)]
+pub enum TypeKind {
+    Vector(VectorType),
+    Base(BaseType),
+    Pointer(Box<TypeKind>, AccessLevel),
+    Custom(String),
+    Wildcard(Wildcard),
+}
+
+impl TypeKind {
+    pub fn base_type(&self) -> Option<&BaseType> {
+        match self {
+            Self::Vector(t) => Some(t.base_type()),
+            Self::Pointer(t, _) => t.base_type(),
+            Self::Base(t) => Some(t),
+            Self::Wildcard(..) => None,
+            Self::Custom(..) => None,
+        }
+    }
+
+    pub fn base_type_mut(&mut self) -> Option<&mut BaseType> {
+        match self {
+            Self::Vector(t) => Some(t.base_type_mut()),
+            Self::Pointer(t, _) => t.base_type_mut(),
+            Self::Base(t) => Some(t),
+            Self::Wildcard(..) => None,
+            Self::Custom(..) => None,
+        }
+    }
+
+    pub fn populate_wildcard(&mut self, type_kind: TypeKind) -> context::Result {
+        match self {
+            Self::Wildcard(..) => *self = type_kind,
+            Self::Pointer(t, _) => t.populate_wildcard(type_kind)?,
+            _ => return Err("no wildcard available to populate".to_string()),
+        }
+        Ok(())
+    }
+
+    pub fn base(&self) -> Option<&BaseType> {
+        match self {
+            Self::Base(ty) => Some(ty),
+            Self::Pointer(tk, _) => tk.base(),
+            _ => None,
+        }
+    }
+
+    pub fn vector(&self) -> Option<&VectorType> {
+        match self {
+            Self::Vector(ty) => Some(ty),
+            _ => None,
+        }
+    }
+
+    pub fn vector_mut(&mut self) -> Option<&mut VectorType> {
+        match self {
+            Self::Vector(ty) => Some(ty),
+            _ => None,
+        }
+    }
+
+    pub fn wildcard(&self) -> Option<&Wildcard> {
+        match self {
+            Self::Wildcard(w) => Some(w),
+            Self::Pointer(w, _) => w.wildcard(),
+            _ => None,
+        }
+    }
+
+    pub fn make_predicate_from(ty: &TypeKind) -> context::Result<TypeKind> {
+        Ok(TypeKind::Vector(VectorType::make_predicate_from_bitsize(
+            ty.base_type()
+                .ok_or_else(|| format!("cannot infer predicate from type {ty}"))?
+                .get_size()
+                .map_err(|_| format!("cannot infer predicate from unsized type {ty}"))?,
+        )))
+    }
+
+    pub fn make_vector(
+        from: TypeKind,
+        is_scalable: bool,
+        tuple_size: Option<VectorTupleSize>,
+    ) -> context::Result<TypeKind> {
+        from.base().cloned().map_or_else(
+            || Err(format!("cannot make a vector type out of {from}!")),
+            |base| {
+                let vt = VectorType::make_from_base(base, is_scalable, tuple_size);
+                Ok(TypeKind::Vector(vt))
+            },
+        )
+    }
+
+    /// Return a new expression that converts the provided `expr` from type `other` to `self`.
+    ///
+    /// Conversions are bitwise over the whole value, like `transmute`, though `transmute`
+    /// itself is only used as a last resort.
+    ///
+    /// This can fail (returning `None`) due to incompatible types, and many conversions are simply
+    /// unimplemented.
+    pub fn express_reinterpretation_from(
+        &self,
+        other: &TypeKind,
+        expr: impl Into<Expression>,
+    ) -> Option<Expression> {
+        if self == other {
+            Some(expr.into())
+        } else if let (Some(self_vty), Some(other_vty)) = (self.vector(), other.vector()) {
+            if self_vty.is_scalable
+                && self_vty.tuple_size.is_none()
+                && other_vty.is_scalable
+                && other_vty.tuple_size.is_none()
+            {
+                // Plain scalable vectors.
+                use BaseTypeKind::*;
+                match (self_vty.base_type, other_vty.base_type) {
+                    (BaseType::Sized(Int, self_size), BaseType::Sized(UInt, other_size))
+                        if self_size == other_size =>
+                    {
+                        Some(Expression::MethodCall(
+                            Box::new(expr.into()),
+                            "as_signed".parse().unwrap(),
+                            vec![],
+                        ))
+                    }
+                    (BaseType::Sized(UInt, self_size), BaseType::Sized(Int, other_size))
+                        if self_size == other_size =>
+                    {
+                        Some(Expression::MethodCall(
+                            Box::new(expr.into()),
+                            "as_unsigned".parse().unwrap(),
+                            vec![],
+                        ))
+                    }
+                    (
+                        BaseType::Sized(Float | Int | UInt, _),
+                        BaseType::Sized(Float | Int | UInt, _),
+                    ) => Some(FnCall::new_expression(
+                        // Conversions between float and (u)int, or where the lane size changes.
+                        "simd_reinterpret".parse().unwrap(),
+                        vec![expr.into()],
+                    )),
+                    _ => None,
+                }
+            } else {
+                // Tuples and fixed-width vectors.
+                None
+            }
+        } else {
+            // Scalar types.
+            None
+        }
+    }
+}
+
+impl FromStr for TypeKind {
+    type Err = String;
+
+    fn from_str(s: &str) -> Result<Self, Self::Err> {
+        Ok(match s {
+            s if s.starts_with('{') && s.ends_with('}') => {
+                Self::Wildcard(s[1..s.len() - 1].trim().parse()?)
+            }
+            s if s.starts_with('*') => {
+                let mut split = s[1..].split_whitespace();
+                let (ty, rw) = match (split.clone().count(), split.next(), split.next()) {
+                    (2, Some("mut"), Some(ty)) => (ty, AccessLevel::RW),
+                    (2, Some("const"), Some(ty)) => (ty, AccessLevel::R),
+                    (1, Some(ty), None) => (ty, AccessLevel::R),
+                    _ => return Err(format!("invalid pointer type {s:#?} given")),
+                };
+                Self::Pointer(Box::new(ty.parse()?), rw)
+            }
+            _ => s
+                .parse::<VectorType>()
+                .map(TypeKind::Vector)
+                .or_else(|_| s.parse::<BaseType>().map(TypeKind::Base))
+                .unwrap_or_else(|_| TypeKind::Custom(s.to_string())),
+        })
+    }
+}
+
+impl fmt::Display for TypeKind {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            Self::Vector(ty) => write!(f, "{ty}"),
+            Self::Pointer(ty, _) => write!(f, "{ty}"),
+            Self::Base(ty) => write!(f, "{ty}"),
+            Self::Wildcard(w) => write!(f, "{{{w}}}"),
+            Self::Custom(s) => write!(f, "{s}"),
+        }
+    }
+}
+
+impl ToRepr for TypeKind {
+    fn repr(&self, repr: TypeRepr) -> String {
+        match self {
+            Self::Vector(ty) => ty.repr(repr),
+            Self::Pointer(ty, _) => ty.repr(repr),
+            Self::Base(ty) => ty.repr(repr),
+            Self::Wildcard(w) => format!("{w}"),
+            Self::Custom(s) => s.to_string(),
+        }
+    }
+}
+
+impl ToTokens for TypeKind {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        if let Self::Pointer(_, rw) = self {
+            tokens.append_all(match rw {
+                AccessLevel::RW => quote! { *mut },
+                AccessLevel::R => quote! { *const },
+            })
+        }
+
+        tokens.append_all(
+            self.to_string()
+                .parse::<TokenStream>()
+                .expect("invalid syntax"),
+        )
+    }
+}
+
+impl PartialOrd for TypeKind {
+    fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+impl Ord for TypeKind {
+    fn cmp(&self, other: &Self) -> std::cmp::Ordering {
+        use std::cmp::Ordering::*;
+
+        impl From<&TypeKind> for usize {
+            fn from(ty: &TypeKind) -> Self {
+                match ty {
+                    TypeKind::Base(_) => 1,
+                    TypeKind::Pointer(_, _) => 2,
+                    TypeKind::Vector(_) => 3,
+                    TypeKind::Custom(_) => 4,
+                    TypeKind::Wildcard(_) => 5,
+                }
+            }
+        }
+
+        let self_int: usize = self.into();
+        let other_int: usize = other.into();
+
+        if self_int == other_int {
+            match (self, other) {
+                (TypeKind::Base(ty1), TypeKind::Base(ty2)) => ty1.cmp(ty2),
+                (TypeKind::Pointer(ty1, _), TypeKind::Pointer(ty2, _)) => ty1.cmp(ty2),
+                (TypeKind::Vector(vt1), TypeKind::Vector(vt2)) => vt1.cmp(vt2),
+                (TypeKind::Custom(s1), TypeKind::Custom(s2)) => s1.cmp(s2),
+                (TypeKind::Wildcard(..), TypeKind::Wildcard(..)) => Equal,
+                _ => unreachable!(),
+            }
+        } else {
+            self_int.cmp(&other_int)
+        }
+    }
+}
+
+impl VectorType {
+    pub fn base_type(&self) -> &BaseType {
+        &self.base_type
+    }
+
+    pub fn base_type_mut(&mut self) -> &mut BaseType {
+        &mut self.base_type
+    }
+
+    fn sanitise_lanes(
+        mut base_type: BaseType,
+        lanes: Option<u32>,
+    ) -> Result<(BaseType, u32), String> {
+        let lanes = match (base_type, lanes) {
+            (BaseType::Sized(BaseTypeKind::Bool, lanes), None) => {
+                base_type = BaseType::Sized(BaseTypeKind::Bool, VECTOR_FULL_REGISTER_SIZE / lanes);
+                lanes
+            }
+            (BaseType::Unsized(BaseTypeKind::Bool), None) => {
+                base_type = BaseType::Sized(BaseTypeKind::Bool, 8);
+                16
+            }
+            (BaseType::Sized(_, size), None) => VECTOR_FULL_REGISTER_SIZE / size,
+            (BaseType::Sized(_, size), Some(lanes)) => match size * lanes {
+                VECTOR_FULL_REGISTER_SIZE | VECTOR_HALF_REGISTER_SIZE => lanes,
+                _ => return Err("invalid number of lanes".to_string()),
+            },
+            _ => return Err("cannot infer number of lanes".to_string()),
+        };
+
+        Ok((base_type, lanes))
+    }
+
+    pub fn make_from_base(
+        base_ty: BaseType,
+        is_scalable: bool,
+        tuple_size: Option<VectorTupleSize>,
+    ) -> VectorType {
+        if is_scalable {
+            if let BaseType::Sized(BaseTypeKind::Bool, size) = base_ty {
+                return Self::make_predicate_from_bitsize(size);
+            }
+        }
+
+        let (base_type, lanes) = Self::sanitise_lanes(base_ty, None).unwrap();
+
+        VectorType {
+            base_type,
+            lanes,
+            is_scalable,
+            tuple_size,
+        }
+    }
+
+    pub fn make_predicate_from_bitsize(size: u32) -> VectorType {
+        VectorType {
+            base_type: BaseType::Sized(BaseTypeKind::Bool, size),
+            lanes: (VECTOR_FULL_REGISTER_SIZE / size),
+            is_scalable: true,
+            tuple_size: None,
+        }
+    }
+
+    pub fn cast_base_type_as(&mut self, ty: BaseType) {
+        self.base_type = ty
+    }
+}
+
+impl FromStr for VectorType {
+    type Err = String;
+
+    fn from_str(s: &str) -> Result<Self, Self::Err> {
+        lazy_static! {
+            static ref RE: Regex = Regex::new(r"^(?:(?:sv(?P<sv_ty>(?:uint|int|bool|float)(?:\d+)?))|(?:(?P<ty>(?:uint|int|bool|float)(?:\d+)?)x(?P<lanes>[0-9])))(?:x(?P<tuple_size>2|3|4))?_t$").unwrap();
+        }
+
+        if let Some(c) = RE.captures(s) {
+            let (base_type, lanes) = Self::sanitise_lanes(
+                c.name("sv_ty")
+                    .or_else(|| c.name("ty"))
+                    .map(<&str>::from)
+                    .map(BaseType::from_str)
+                    .unwrap()?,
+                c.name("lanes")
+                    .map(<&str>::from)
+                    .map(u32::from_str)
+                    .transpose()
+                    .unwrap(),
+            )
+            .map_err(|e| format!("invalid {s:#?} vector type: {e}"))?;
+
+            let tuple_size = c
+                .name("tuple_size")
+                .map(<&str>::from)
+                .map(VectorTupleSize::from_str)
+                .transpose()
+                .unwrap();
+
+            Ok(VectorType {
+                base_type,
+                is_scalable: c.name("sv_ty").is_some(),
+                lanes,
+                tuple_size,
+            })
+        } else {
+            Err(format!("invalid vector type {s:#?} given"))
+        }
+    }
+}
+
+impl ToRepr for VectorType {
+    fn repr(&self, repr: TypeRepr) -> String {
+        let make_llvm_repr = |show_unsigned| {
+            format!(
+                "{}v{}{}",
+                if self.is_scalable { "nx" } else { "" },
+                self.lanes * (self.tuple_size.map(usize::from).unwrap_or(1) as u32),
+                match self.base_type {
+                    BaseType::Sized(BaseTypeKind::UInt, size) if show_unsigned =>
+                        format!("u{size}"),
+                    _ => self.base_type.llvm_machine_repr(),
+                }
+            )
+        };
+
+        if matches!(repr, TypeRepr::ACLENotation) {
+            self.base_type.acle_notation_repr()
+        } else if matches!(repr, TypeRepr::LLVMMachine) {
+            make_llvm_repr(false)
+        } else if self.is_scalable {
+            match (self.base_type, self.lanes, self.tuple_size) {
+                (BaseType::Sized(BaseTypeKind::Bool, _), 16, _) => "svbool_t".to_string(),
+                (BaseType::Sized(BaseTypeKind::Bool, _), lanes, _) => format!("svbool{lanes}_t"),
+                (BaseType::Sized(_, size), lanes, _)
+                    if VECTOR_FULL_REGISTER_SIZE != (size * lanes) =>
+                {
+                    // Special internal type case
+                    make_llvm_repr(true)
+                }
+                (ty, _, None) => format!("sv{}_t", ty.c_repr()),
+                (ty, _, Some(tuple_size)) => format!("sv{}x{tuple_size}_t", ty.c_repr()),
+            }
+        } else {
+            match self.tuple_size {
+                Some(tuple_size) => format!(
+                    "{}x{}x{}_t",
+                    self.base_type.c_repr(),
+                    self.lanes,
+                    tuple_size
+                ),
+                None => format!("{}x{}_t", self.base_type.c_repr(), self.lanes),
+            }
+        }
+    }
+}
+
+impl fmt::Display for VectorType {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{}", self.c_repr())
+    }
+}
+
+impl From<VectorTupleSize> for usize {
+    fn from(t: VectorTupleSize) -> Self {
+        match t {
+            VectorTupleSize::Two => 2,
+            VectorTupleSize::Three => 3,
+            VectorTupleSize::Four => 4,
+        }
+    }
+}
+
+impl FromStr for VectorTupleSize {
+    type Err = String;
+
+    fn from_str(s: &str) -> Result<Self, Self::Err> {
+        match s {
+            "2" => Ok(Self::Two),
+            "3" => Ok(Self::Three),
+            "4" => Ok(Self::Four),
+            _ => Err(format!("invalid vector tuple size `{s}` provided")),
+        }
+    }
+}
+
+impl TryFrom<usize> for VectorTupleSize {
+    type Error = String;
+
+    fn try_from(value: usize) -> Result<Self, Self::Error> {
+        match value {
+            2 => Ok(Self::Two),
+            3 => Ok(Self::Three),
+            4 => Ok(Self::Four),
+            _ => Err(format!("invalid vector tuple size `{value}` provided")),
+        }
+    }
+}
+
+impl fmt::Display for VectorTupleSize {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{}", usize::from(*self))
+    }
+}
+
+impl FromStr for BaseTypeKind {
+    type Err = String;
+
+    fn from_str(s: &str) -> Result<Self, Self::Err> {
+        match s {
+            "float" | "f" => Ok(Self::Float),
+            "int" | "i" => Ok(Self::Int),
+            "uint" | "u" => Ok(Self::UInt),
+            "bool" | "b" => Ok(Self::Bool),
+            _ => Err(format!("no match for {s}")),
+        }
+    }
+}
+
+impl ToRepr for BaseTypeKind {
+    fn repr(&self, repr: TypeRepr) -> String {
+        match (repr, self) {
+            (TypeRepr::C, Self::Float) => "float",
+            (TypeRepr::C, Self::Int) => "int",
+            (TypeRepr::C, Self::UInt) => "uint",
+            (TypeRepr::Rust | TypeRepr::LLVMMachine | TypeRepr::ACLENotation, Self::Float) => "f",
+            (TypeRepr::Rust, Self::Int) | (TypeRepr::LLVMMachine, Self::Int | Self::UInt) => "i",
+            (TypeRepr::Rust | TypeRepr::ACLENotation, Self::UInt) => "u",
+            (TypeRepr::ACLENotation, Self::Int) => "s",
+            (TypeRepr::ACLENotation, Self::Bool) => "b",
+            (_, Self::Bool) => "bool",
+            _ => {
+                unreachable!("no base type kind available for representation {repr:?}")
+            }
+        }
+        .to_string()
+    }
+}
+
+impl fmt::Display for BaseTypeKind {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{}", self.c_repr())
+    }
+}
+
+impl BaseType {
+    pub fn get_size(&self) -> Result<u32, String> {
+        match self {
+            Self::Sized(_, size) => Ok(*size),
+            _ => Err(format!("unexpected invalid base type given {self:#?}")),
+        }
+    }
+
+    pub fn kind(&self) -> &BaseTypeKind {
+        match self {
+            BaseType::Sized(kind, _) | BaseType::Unsized(kind) => kind,
+        }
+    }
+
+    pub fn is_bool(&self) -> bool {
+        self.kind() == &BaseTypeKind::Bool
+    }
+
+    pub fn is_float(&self) -> bool {
+        self.kind() == &BaseTypeKind::Float
+    }
+}
+
+impl FromStr for BaseType {
+    type Err = String;
+
+    fn from_str(s: &str) -> Result<Self, Self::Err> {
+        lazy_static! {
+            static ref RE: Regex = Regex::new(r"^(?P<kind>[a-zA-Z]+)(?P<size>\d+)?(_t)?$").unwrap();
+        }
+
+        if let Some(c) = RE.captures(s) {
+            let kind = c["kind"].parse()?;
+            let size = c
+                .name("size")
+                .map(<&str>::from)
+                .map(u32::from_str)
+                .transpose()
+                .unwrap();
+
+            match size {
+                Some(size) => Ok(Self::Sized(kind, size)),
+                None => Ok(Self::Unsized(kind)),
+            }
+        } else {
+            Err(format!("failed to parse type `{s}`"))
+        }
+    }
+}
+
+impl ToRepr for BaseType {
+    fn repr(&self, repr: TypeRepr) -> String {
+        use BaseType::*;
+        use BaseTypeKind::*;
+        use TypeRepr::*;
+        match (self, &repr) {
+            (Sized(Bool, _) | Unsized(Bool), LLVMMachine) => "i1".to_string(),
+            (Sized(_, size), SizeLiteral) if *size == 8 => "b".to_string(),
+            (Sized(_, size), SizeLiteral) if *size == 16 => "h".to_string(),
+            (Sized(_, size), SizeLiteral) if *size == 32 => "w".to_string(),
+            (Sized(_, size), SizeLiteral) if *size == 64 => "d".to_string(),
+            (_, SizeLiteral) => unreachable!("cannot represent {self:#?} as size literal"),
+            (Sized(Float, _) | Unsized(Float), TypeKind) => "f".to_string(),
+            (Sized(Int, _) | Unsized(Int), TypeKind) => "s".to_string(),
+            (Sized(UInt, _) | Unsized(UInt), TypeKind) => "u".to_string(),
+            (Sized(_, size), Size) => size.to_string(),
+            (Sized(_, size), SizeInBytesLog2) => {
+                assert!(size.is_power_of_two() && *size >= 8);
+                (size >> 3).trailing_zeros().to_string()
+            }
+            (Sized(kind, size), _) => format!("{}{size}", kind.repr(repr)),
+            (Unsized(kind), _) => kind.repr(repr),
+        }
+    }
+}
+
+impl fmt::Display for BaseType {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(f, "{}", self.rust_repr())
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use crate::typekinds::*;
+
+    #[test]
+    fn test_predicate() {
+        assert_eq!(
+            "svbool_t".parse::<TypeKind>().unwrap(),
+            TypeKind::Vector(VectorType {
+                base_type: BaseType::Sized(BaseTypeKind::Bool, 8),
+                is_scalable: true,
+                lanes: 16,
+                tuple_size: None
+            })
+        );
+    }
+
+    #[test]
+    fn test_llvm_internal_predicate() {
+        assert_eq!(
+            "svbool4_t".parse::<TypeKind>().unwrap(),
+            TypeKind::Vector(VectorType {
+                base_type: BaseType::Sized(BaseTypeKind::Bool, 32),
+                is_scalable: true,
+                lanes: 4,
+                tuple_size: None
+            })
+        );
+    }
+
+    #[test]
+    fn test_llvm_internal_predicate_llvm() {
+        assert_eq!(
+            "svbool4_t".parse::<TypeKind>().unwrap().llvm_machine_repr(),
+            "nxv4i1"
+        );
+    }
+
+    #[test]
+    fn test_llvm_internal_predicate_acle() {
+        assert_eq!(
+            "svbool4_t"
+                .parse::<TypeKind>()
+                .unwrap()
+                .acle_notation_repr(),
+            "b32"
+        );
+    }
+
+    #[test]
+    fn test_predicate_from_bitsize() {
+        let pg = VectorType::make_predicate_from_bitsize(32);
+        assert_eq!(pg.acle_notation_repr(), "b32");
+        assert_eq!(pg, "svbool4_t".parse().unwrap());
+        assert_eq!(pg.lanes, 4);
+        assert_eq!(pg.base_type, BaseType::Sized(BaseTypeKind::Bool, 32));
+    }
+
+    #[test]
+    fn test_scalable_single() {
+        assert_eq!(
+            "svuint8_t".parse::<TypeKind>().unwrap(),
+            TypeKind::Vector(VectorType {
+                base_type: BaseType::Sized(BaseTypeKind::UInt, 8),
+                is_scalable: true,
+                lanes: 16,
+                tuple_size: None
+            })
+        );
+    }
+
+    #[test]
+    fn test_scalable_tuple() {
+        assert_eq!(
+            "svint64x3_t".parse::<TypeKind>().unwrap(),
+            TypeKind::Vector(VectorType {
+                base_type: BaseType::Sized(BaseTypeKind::Int, 64),
+                is_scalable: true,
+                lanes: 2,
+                tuple_size: Some(VectorTupleSize::Three),
+            })
+        );
+    }
+
+    #[test]
+    fn test_scalable_single_llvm() {
+        assert_eq!(
+            "svuint32_t"
+                .parse::<TypeKind>()
+                .unwrap()
+                .llvm_machine_repr(),
+            "nxv4i32"
+        );
+    }
+
+    #[test]
+    fn test_scalable_tuple_llvm() {
+        assert_eq!(
+            "svint32x4_t"
+                .parse::<TypeKind>()
+                .unwrap()
+                .llvm_machine_repr(),
+            "nxv16i32"
+        );
+    }
+
+    #[test]
+    fn test_vector_single_full() {
+        assert_eq!(
+            "uint32x4_t".parse::<TypeKind>().unwrap(),
+            TypeKind::Vector(VectorType {
+                base_type: BaseType::Sized(BaseTypeKind::UInt, 32),
+                is_scalable: false,
+                lanes: 4,
+                tuple_size: None,
+            })
+        );
+    }
+
+    #[test]
+    fn test_vector_single_half() {
+        assert_eq!(
+            "uint32x2_t".parse::<TypeKind>().unwrap(),
+            TypeKind::Vector(VectorType {
+                base_type: BaseType::Sized(BaseTypeKind::UInt, 32),
+                is_scalable: false,
+                lanes: 2,
+                tuple_size: None,
+            })
+        );
+    }
+
+    #[test]
+    fn test_vector_tuple() {
+        assert_eq!(
+            "uint64x2x4_t".parse::<TypeKind>().unwrap(),
+            TypeKind::Vector(VectorType {
+                base_type: BaseType::Sized(BaseTypeKind::UInt, 64),
+                is_scalable: false,
+                lanes: 2,
+                tuple_size: Some(VectorTupleSize::Four),
+            })
+        );
+    }
+
+    #[test]
+    fn test_const_pointer() {
+        let p = "*u32".parse::<TypeKind>().unwrap();
+        assert_eq!(
+            p,
+            TypeKind::Pointer(
+                Box::new(TypeKind::Base(BaseType::Sized(BaseTypeKind::UInt, 32))),
+                AccessLevel::R
+            )
+        );
+        assert_eq!(p.to_token_stream().to_string(), "* const u32")
+    }
+
+    #[test]
+    fn test_mut_pointer() {
+        let p = "*mut u32".parse::<TypeKind>().unwrap();
+        assert_eq!(
+            p,
+            TypeKind::Pointer(
+                Box::new(TypeKind::Base(BaseType::Sized(BaseTypeKind::UInt, 32))),
+                AccessLevel::RW
+            )
+        );
+        assert_eq!(p.to_token_stream().to_string(), "* mut u32")
+    }
+
+    #[test]
+    #[should_panic]
+    fn test_invalid_vector_single() {
+        assert_eq!(
+            "uint32x8_t".parse::<TypeKind>().unwrap(),
+            TypeKind::Vector(VectorType {
+                base_type: BaseType::Sized(BaseTypeKind::UInt, 32),
+                is_scalable: false,
+                lanes: 8,
+                tuple_size: None,
+            })
+        );
+    }
+
+    #[test]
+    #[should_panic]
+    fn test_invalid_vector_tuple() {
+        assert_eq!(
+            "uint32x4x5_t".parse::<TypeKind>().unwrap(),
+            TypeKind::Vector(VectorType {
+                base_type: BaseType::Sized(BaseTypeKind::UInt, 32),
+                is_scalable: false,
+                lanes: 8,
+                tuple_size: None, // cannot represent
+            })
+        );
+    }
+
+    #[test]
+    fn test_base() {
+        assert_eq!(
+            "u32".parse::<TypeKind>().unwrap(),
+            TypeKind::Base(BaseType::Sized(BaseTypeKind::UInt, 32)),
+        )
+    }
+
+    #[test]
+    fn test_custom() {
+        assert_eq!(
+            "svpattern".parse::<TypeKind>().unwrap(),
+            TypeKind::Custom("svpattern".to_string()),
+        )
+    }
+
+    #[test]
+    fn test_wildcard_type() {
+        assert_eq!(
+            "{type}".parse::<TypeKind>().unwrap(),
+            TypeKind::Wildcard(Wildcard::Type(None)),
+        )
+    }
+
+    #[test]
+    fn test_wildcard_typeset() {
+        assert_eq!(
+            "{type[0]}".parse::<TypeKind>().unwrap(),
+            TypeKind::Wildcard(Wildcard::Type(Some(0))),
+        )
+    }
+
+    #[test]
+    fn test_wildcard_sve_type() {
+        assert_eq!(
+            "{sve_type}".parse::<TypeKind>().unwrap(),
+            TypeKind::Wildcard(Wildcard::SVEType(None, None)),
+        )
+    }
+
+    #[test]
+    fn test_wildcard_sve_typeset() {
+        assert_eq!(
+            "{sve_type[0]}".parse::<TypeKind>().unwrap(),
+            TypeKind::Wildcard(Wildcard::SVEType(Some(0), None)),
+        )
+    }
+
+    #[test]
+    fn test_wildcard_sve_tuple_type() {
+        assert_eq!(
+            "{sve_type_x2}".parse::<TypeKind>().unwrap(),
+            TypeKind::Wildcard(Wildcard::SVEType(None, Some(VectorTupleSize::Two))),
+        )
+    }
+
+    #[test]
+    fn test_wildcard_sve_tuple_typeset() {
+        assert_eq!(
+            "{sve_type_x2[0]}".parse::<TypeKind>().unwrap(),
+            TypeKind::Wildcard(Wildcard::SVEType(Some(0), Some(VectorTupleSize::Two))),
+        )
+    }
+
+    #[test]
+    fn test_wildcard_predicate() {
+        assert_eq!(
+            "{predicate}".parse::<TypeKind>().unwrap(),
+            TypeKind::Wildcard(Wildcard::Predicate(None))
+        )
+    }
+
+    #[test]
+    fn test_wildcard_scale() {
+        assert_eq!(
+            "{sve_type as i8}".parse::<TypeKind>().unwrap(),
+            TypeKind::Wildcard(Wildcard::Scale(
+                Box::new(Wildcard::SVEType(None, None)),
+                Box::new(TypeKind::Base(BaseType::Sized(BaseTypeKind::Int, 8)))
+            ))
+        )
+    }
+
+    #[test]
+    fn test_size_in_bytes_log2() {
+        assert_eq!("i8".parse::<TypeKind>().unwrap().size_in_bytes_log2(), "0");
+        assert_eq!("i16".parse::<TypeKind>().unwrap().size_in_bytes_log2(), "1");
+        assert_eq!("i32".parse::<TypeKind>().unwrap().size_in_bytes_log2(), "2");
+        assert_eq!("i64".parse::<TypeKind>().unwrap().size_in_bytes_log2(), "3")
+    }
+
+    #[test]
+    #[should_panic]
+    fn test_invalid_size_in_bytes_log2() {
+        "i9".parse::<TypeKind>().unwrap().size_in_bytes_log2();
+    }
+}
diff --git a/crates/stdarch-gen2/src/wildcards.rs b/crates/stdarch-gen2/src/wildcards.rs
new file mode 100644
index 0000000000..9d6194d517
--- /dev/null
+++ b/crates/stdarch-gen2/src/wildcards.rs
@@ -0,0 +1,179 @@
+use lazy_static::lazy_static;
+use regex::Regex;
+use serde_with::{DeserializeFromStr, SerializeDisplay};
+use std::fmt;
+use std::str::FromStr;
+
+use crate::{
+    predicate_forms::PredicationMask,
+    typekinds::{ToRepr, TypeKind, TypeKindOptions, VectorTupleSize},
+};
+
+#[derive(Debug, Clone, PartialEq, Eq, Hash, SerializeDisplay, DeserializeFromStr)]
+pub enum Wildcard {
+    Type(Option<usize>),
+    /// NEON type derivated by a base type
+    NEONType(Option<usize>, Option<VectorTupleSize>),
+    /// SVE type derivated by a base type
+    SVEType(Option<usize>, Option<VectorTupleSize>),
+    /// Integer representation of bitsize
+    Size(Option<usize>),
+    /// Integer representation of bitsize minus one
+    SizeMinusOne(Option<usize>),
+    /// Literal representation of the bitsize: b(yte), h(half), w(ord) or d(ouble)
+    SizeLiteral(Option<usize>),
+    /// Literal representation of the type kind: f(loat), s(igned), u(nsigned)
+    TypeKind(Option<usize>, Option<TypeKindOptions>),
+    /// Log2 of the size in bytes
+    SizeInBytesLog2(Option<usize>),
+    /// Predicate to be inferred from the specified type
+    Predicate(Option<usize>),
+    /// Predicate to be inferred from the greatest type
+    MaxPredicate,
+
+    Scale(Box<Wildcard>, Box<TypeKind>),
+
+    // Other wildcards
+    LLVMLink,
+    NVariant,
+    /// Predicate forms to use and placeholder for a predicate form function name modifier
+    PredicateForms(PredicationMask),
+
+    /// User-set wildcard through `substitutions`
+    Custom(String),
+}
+
+impl Wildcard {
+    pub fn is_nonpredicate_type(&self) -> bool {
+        matches!(
+            self,
+            Wildcard::Type(..) | Wildcard::NEONType(..) | Wildcard::SVEType(..)
+        )
+    }
+
+    pub fn get_typeset_index(&self) -> Option<usize> {
+        match self {
+            Wildcard::Type(idx) | Wildcard::NEONType(idx, ..) | Wildcard::SVEType(idx, ..) => {
+                Some(idx.unwrap_or(0))
+            }
+            _ => None,
+        }
+    }
+}
+
+impl FromStr for Wildcard {
+    type Err = String;
+
+    fn from_str(s: &str) -> Result<Self, Self::Err> {
+        lazy_static! {
+            static ref RE: Regex = Regex::new(r"^(?P<wildcard>\w+?)(?:_x(?P<tuple_size>[2-4]))?(?:\[(?P<index>\d+)\])?(?:\.(?P<modifiers>\w+))?(?:\s+as\s+(?P<scale_to>.*?))?$").unwrap();
+        }
+
+        if let Some(c) = RE.captures(s) {
+            let wildcard_name = &c["wildcard"];
+            let inputset_index = c
+                .name("index")
+                .map(<&str>::from)
+                .map(usize::from_str)
+                .transpose()
+                .map_err(|_| format!("{:#?} is not a valid type index", &c["index"]))?;
+            let tuple_size = c
+                .name("tuple_size")
+                .map(<&str>::from)
+                .map(VectorTupleSize::from_str)
+                .transpose()
+                .map_err(|_| format!("{:#?} is not a valid tuple size", &c["tuple_size"]))?;
+            let modifiers = c.name("modifiers").map(<&str>::from);
+
+            let wildcard = match (wildcard_name, inputset_index, tuple_size, modifiers) {
+                ("type", index, None, None) => Ok(Wildcard::Type(index)),
+                ("neon_type", index, tuple, None) => Ok(Wildcard::NEONType(index, tuple)),
+                ("sve_type", index, tuple, None) => Ok(Wildcard::SVEType(index, tuple)),
+                ("size", index, None, None) => Ok(Wildcard::Size(index)),
+                ("size_minus_one", index, None, None) => Ok(Wildcard::SizeMinusOne(index)),
+                ("size_literal", index, None, None) => Ok(Wildcard::SizeLiteral(index)),
+                ("type_kind", index, None, modifiers) => Ok(Wildcard::TypeKind(
+                    index,
+                    modifiers.map(|modifiers| modifiers.parse()).transpose()?,
+                )),
+                ("size_in_bytes_log2", index, None, None) => Ok(Wildcard::SizeInBytesLog2(index)),
+                ("predicate", index, None, None) => Ok(Wildcard::Predicate(index)),
+                ("max_predicate", None, None, None) => Ok(Wildcard::MaxPredicate),
+                ("llvm_link", None, None, None) => Ok(Wildcard::LLVMLink),
+                ("_n", None, None, None) => Ok(Wildcard::NVariant),
+                (w, None, None, None) if w.starts_with('_') => {
+                    // test for predicate forms
+                    let pf_mask = PredicationMask::from_str(&w[1..]);
+                    if let Ok(mask) = pf_mask {
+                        if mask.has_merging() {
+                            Ok(Wildcard::PredicateForms(mask))
+                        } else {
+                            Err("cannot add predication without a Merging form".to_string())
+                        }
+                    } else {
+                        Err(format!("invalid wildcard `{s:#?}`"))
+                    }
+                }
+                (cw, None, None, None) => Ok(Wildcard::Custom(cw.to_string())),
+                _ => Err(format!("invalid wildcard `{s:#?}`")),
+            }?;
+
+            let scale_to = c
+                .name("scale_to")
+                .map(<&str>::from)
+                .map(TypeKind::from_str)
+                .transpose()
+                .map_err(|_| format!("{:#?} is not a valid type", &c["scale_to"]))?;
+
+            if let Some(scale_to) = scale_to {
+                Ok(Wildcard::Scale(Box::new(wildcard), Box::new(scale_to)))
+            } else {
+                Ok(wildcard)
+            }
+        } else {
+            Err(format!("invalid wildcard `{s:#?}`"))
+        }
+    }
+}
+
+impl fmt::Display for Wildcard {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        match self {
+            Self::Type(None) => write!(f, "type"),
+            Self::Type(Some(index)) => write!(f, "type[{index}]"),
+            Self::NEONType(None, None) => write!(f, "neon_type"),
+            Self::NEONType(Some(index), None) => write!(f, "neon_type[{index}]"),
+            Self::NEONType(None, Some(tuple_size)) => write!(f, "neon_type_x{tuple_size}"),
+            Self::NEONType(Some(index), Some(tuple_size)) => {
+                write!(f, "neon_type_x{tuple_size}[{index}]")
+            }
+            Self::SVEType(None, None) => write!(f, "sve_type"),
+            Self::SVEType(Some(index), None) => write!(f, "sve_type[{index}]"),
+            Self::SVEType(None, Some(tuple_size)) => write!(f, "sve_type_x{tuple_size}"),
+            Self::SVEType(Some(index), Some(tuple_size)) => {
+                write!(f, "sve_type_x{tuple_size}[{index}]")
+            }
+            Self::Size(None) => write!(f, "size"),
+            Self::Size(Some(index)) => write!(f, "size[{index}]"),
+            Self::SizeMinusOne(None) => write!(f, "size_minus_one"),
+            Self::SizeMinusOne(Some(index)) => write!(f, "size_minus_one[{index}]"),
+            Self::SizeLiteral(None) => write!(f, "size_literal"),
+            Self::SizeLiteral(Some(index)) => write!(f, "size_literal[{index}]"),
+            Self::TypeKind(None, None) => write!(f, "type_kind"),
+            Self::TypeKind(None, Some(opts)) => write!(f, "type_kind.{opts}"),
+            Self::TypeKind(Some(index), None) => write!(f, "type_kind[{index}]"),
+            Self::TypeKind(Some(index), Some(opts)) => write!(f, "type_kind[{index}].{opts}"),
+            Self::SizeInBytesLog2(None) => write!(f, "size_in_bytes_log2"),
+            Self::SizeInBytesLog2(Some(index)) => write!(f, "size_in_bytes_log2[{index}]"),
+            Self::Predicate(None) => write!(f, "predicate"),
+            Self::Predicate(Some(index)) => write!(f, "predicate[{index}]"),
+            Self::MaxPredicate => write!(f, "max_predicate"),
+            Self::LLVMLink => write!(f, "llvm_link"),
+            Self::NVariant => write!(f, "_n"),
+            Self::PredicateForms(mask) => write!(f, "_{mask}"),
+
+            Self::Scale(wildcard, ty) => write!(f, "{wildcard} as {}", ty.rust_repr()),
+            Self::Custom(cw) => write!(f, "{cw}"),
+        }
+    }
+}
diff --git a/crates/stdarch-gen2/src/wildstring.rs b/crates/stdarch-gen2/src/wildstring.rs
new file mode 100644
index 0000000000..1f9e6c9ada
--- /dev/null
+++ b/crates/stdarch-gen2/src/wildstring.rs
@@ -0,0 +1,353 @@
+use itertools::Itertools;
+use proc_macro2::TokenStream;
+use quote::{quote, ToTokens, TokenStreamExt};
+use serde_with::{DeserializeFromStr, SerializeDisplay};
+use std::str::pattern::Pattern;
+use std::{fmt, str::FromStr};
+
+use crate::context::LocalContext;
+use crate::typekinds::{ToRepr, TypeRepr};
+use crate::wildcards::Wildcard;
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub enum WildStringPart {
+    String(String),
+    Wildcard(Wildcard),
+}
+
+/// Wildcard-able string
+#[derive(Debug, Clone, PartialEq, Eq, Default, SerializeDisplay, DeserializeFromStr)]
+pub struct WildString(Vec<WildStringPart>);
+
+impl WildString {
+    pub fn has_wildcards(&self) -> bool {
+        for part in self.0.iter() {
+            if let WildStringPart::Wildcard(..) = part {
+                return true;
+            }
+        }
+
+        false
+    }
+
+    pub fn wildcards(&self) -> impl Iterator<Item = &Wildcard> + '_ {
+        self.0.iter().filter_map(|part| match part {
+            WildStringPart::Wildcard(w) => Some(w),
+            _ => None,
+        })
+    }
+
+    pub fn iter(&self) -> impl Iterator<Item = &WildStringPart> + '_ {
+        self.0.iter()
+    }
+
+    pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut WildStringPart> + '_ {
+        self.0.iter_mut()
+    }
+
+    pub fn starts_with(&self, s2: &str) -> bool {
+        self.to_string().starts_with(s2)
+    }
+
+    pub fn prepend_str(&mut self, s: impl Into<String>) {
+        self.0.insert(0, WildStringPart::String(s.into()))
+    }
+
+    pub fn push_str(&mut self, s: impl Into<String>) {
+        self.0.push(WildStringPart::String(s.into()))
+    }
+
+    pub fn push_wildcard(&mut self, w: Wildcard) {
+        self.0.push(WildStringPart::Wildcard(w))
+    }
+
+    pub fn is_empty(&self) -> bool {
+        self.0.is_empty()
+    }
+
+    pub fn replace<'a, P>(&'a self, from: P, to: &str) -> WildString
+    where
+        P: Pattern<'a> + Copy,
+    {
+        WildString(
+            self.0
+                .iter()
+                .map(|part| match part {
+                    WildStringPart::String(s) => WildStringPart::String(s.replace(from, to)),
+                    part => part.clone(),
+                })
+                .collect_vec(),
+        )
+    }
+
+    pub fn build_acle(&mut self, ctx: &LocalContext) -> Result<(), String> {
+        self.build(ctx, TypeRepr::ACLENotation)
+    }
+
+    pub fn build(&mut self, ctx: &LocalContext, repr: TypeRepr) -> Result<(), String> {
+        self.iter_mut().try_for_each(|wp| -> Result<(), String> {
+            if let WildStringPart::Wildcard(w) = wp {
+                let value = ctx
+                    .provide_substitution_wildcard(w)
+                    .or_else(|_| ctx.provide_type_wildcard(w).map(|ty| ty.repr(repr)))?;
+                *wp = WildStringPart::String(value);
+            }
+            Ok(())
+        })
+    }
+}
+
+impl From<String> for WildString {
+    fn from(s: String) -> Self {
+        WildString(vec![WildStringPart::String(s)])
+    }
+}
+
+impl FromStr for WildString {
+    type Err = String;
+
+    fn from_str(s: &str) -> Result<Self, Self::Err> {
+        enum State {
+            Normal { start: usize },
+            Wildcard { start: usize, count: usize },
+            EscapeTokenOpen { start: usize, at: usize },
+            EscapeTokenClose { start: usize, at: usize },
+        }
+
+        let mut ws = WildString::default();
+        match s
+            .char_indices()
+            .try_fold(State::Normal { start: 0 }, |state, (idx, ch)| {
+                match (state, ch) {
+                    (State::Normal { start }, '{') => Ok(State::EscapeTokenOpen { start, at: idx }),
+                    (State::Normal { start }, '}') => {
+                        Ok(State::EscapeTokenClose { start, at: idx })
+                    }
+                    (State::EscapeTokenOpen { start, at }, '{')
+                    | (State::EscapeTokenClose { start, at }, '}') => {
+                        if start < at {
+                            ws.push_str(&s[start..at])
+                        }
+
+                        Ok(State::Normal { start: idx })
+                    }
+                    (State::EscapeTokenOpen { at, .. }, '}') => Err(format!(
+                        "empty wildcard given in string {s:?} at position {at}"
+                    )),
+                    (State::EscapeTokenOpen { start, at }, _) => {
+                        if start < at {
+                            ws.push_str(&s[start..at])
+                        }
+
+                        Ok(State::Wildcard {
+                            start: idx,
+                            count: 0,
+                        })
+                    }
+                    (State::EscapeTokenClose { at, .. }, _) => Err(format!(
+                        "closing a non-wildcard/bad escape in string {s:?} at position {at}"
+                    )),
+                    // Nesting wildcards is only supported for `{foo as {bar}}`, wildcards cannot be
+                    // nested at the start of a WildString.
+                    (State::Wildcard { start, count }, '{') => Ok(State::Wildcard {
+                        start,
+                        count: count + 1,
+                    }),
+                    (State::Wildcard { start, count: 0 }, '}') => {
+                        ws.push_wildcard(s[start..idx].parse()?);
+                        Ok(State::Normal { start: idx + 1 })
+                    }
+                    (State::Wildcard { start, count }, '}') => Ok(State::Wildcard {
+                        start,
+                        count: count - 1,
+                    }),
+                    (state @ State::Normal { .. }, _) | (state @ State::Wildcard { .. }, _) => {
+                        Ok(state)
+                    }
+                }
+            })? {
+            State::Normal { start } => {
+                if start < s.len() {
+                    ws.push_str(&s[start..]);
+                }
+
+                Ok(ws)
+            }
+            State::EscapeTokenOpen { at, .. } | State::Wildcard { start: at, .. } => Err(format!(
+                "unclosed wildcard in string {s:?} at position {at}"
+            )),
+            State::EscapeTokenClose { at, .. } => Err(format!(
+                "closing a non-wildcard/bad escape in string {s:?} at position {at}"
+            )),
+        }
+    }
+}
+
+impl fmt::Display for WildString {
+    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+        write!(
+            f,
+            "{}",
+            self.0
+                .iter()
+                .map(|part| match part {
+                    WildStringPart::String(s) => s.to_owned(),
+                    WildStringPart::Wildcard(w) => format!("{{{w}}}"),
+                })
+                .join("")
+        )
+    }
+}
+
+impl ToTokens for WildString {
+    fn to_tokens(&self, tokens: &mut TokenStream) {
+        assert!(
+            !self.has_wildcards(),
+            "cannot convert string with wildcards {self:?} to TokenStream"
+        );
+        let str = self.to_string();
+        tokens.append_all(quote! { #str })
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use crate::typekinds::*;
+    use crate::wildstring::*;
+
+    #[test]
+    fn test_empty_string() {
+        let ws: WildString = "".parse().unwrap();
+        assert_eq!(ws.0.len(), 0);
+    }
+
+    #[test]
+    fn test_plain_string() {
+        let ws: WildString = "plain string".parse().unwrap();
+        assert_eq!(ws.0.len(), 1);
+        assert_eq!(
+            ws,
+            WildString(vec![WildStringPart::String("plain string".to_string())])
+        )
+    }
+
+    #[test]
+    fn test_escaped_curly_brackets() {
+        let ws: WildString = "VALUE = {{value}}".parse().unwrap();
+        assert_eq!(ws.to_string(), "VALUE = {value}");
+        assert!(!ws.has_wildcards());
+    }
+
+    #[test]
+    fn test_escaped_curly_brackets_wildcard() {
+        let ws: WildString = "TYPE = {{{type}}}".parse().unwrap();
+        assert_eq!(ws.to_string(), "TYPE = {{type}}");
+        assert_eq!(ws.0.len(), 4);
+        assert!(ws.has_wildcards());
+    }
+
+    #[test]
+    fn test_wildcard_right_boundary() {
+        let s = "string test {type}";
+        let ws: WildString = s.parse().unwrap();
+        assert_eq!(&ws.to_string(), s);
+        assert!(ws.has_wildcards());
+    }
+
+    #[test]
+    fn test_wildcard_left_boundary() {
+        let s = "{type} string test";
+        let ws: WildString = s.parse().unwrap();
+        assert_eq!(&ws.to_string(), s);
+        assert!(ws.has_wildcards());
+    }
+
+    #[test]
+    fn test_recursive_wildcard() {
+        let s = "string test {type[0] as {type[1]}}";
+        let ws: WildString = s.parse().unwrap();
+
+        assert_eq!(ws.0.len(), 2);
+        assert_eq!(
+            ws,
+            WildString(vec![
+                WildStringPart::String("string test ".to_string()),
+                WildStringPart::Wildcard(Wildcard::Scale(
+                    Box::new(Wildcard::Type(Some(0))),
+                    Box::new(TypeKind::Wildcard(Wildcard::Type(Some(1)))),
+                ))
+            ])
+        );
+    }
+
+    #[test]
+    fn test_scale_wildcard() {
+        let s = "string {type[0] as i8} test";
+        let ws: WildString = s.parse().unwrap();
+
+        assert_eq!(ws.0.len(), 3);
+        assert_eq!(
+            ws,
+            WildString(vec![
+                WildStringPart::String("string ".to_string()),
+                WildStringPart::Wildcard(Wildcard::Scale(
+                    Box::new(Wildcard::Type(Some(0))),
+                    Box::new(TypeKind::Base(BaseType::Sized(BaseTypeKind::Int, 8))),
+                )),
+                WildStringPart::String(" test".to_string())
+            ])
+        );
+    }
+
+    #[test]
+    fn test_solitaire_wildcard() {
+        let ws: WildString = "{type}".parse().unwrap();
+        assert_eq!(ws.0.len(), 1);
+        assert_eq!(
+            ws,
+            WildString(vec![WildStringPart::Wildcard(Wildcard::Type(None))])
+        )
+    }
+
+    #[test]
+    fn test_empty_wildcard() {
+        "string {}"
+            .parse::<WildString>()
+            .expect_err("expected parse error");
+    }
+
+    #[test]
+    fn test_invalid_open_wildcard_right() {
+        "string {"
+            .parse::<WildString>()
+            .expect_err("expected parse error");
+    }
+
+    #[test]
+    fn test_invalid_close_wildcard_right() {
+        "string }"
+            .parse::<WildString>()
+            .expect_err("expected parse error");
+    }
+
+    #[test]
+    fn test_invalid_open_wildcard_left() {
+        "{string"
+            .parse::<WildString>()
+            .expect_err("expected parse error");
+    }
+
+    #[test]
+    fn test_invalid_close_wildcard_left() {
+        "}string"
+            .parse::<WildString>()
+            .expect_err("expected parse error");
+    }
+
+    #[test]
+    fn test_consecutive_wildcards() {
+        let s = "svprf{size_literal[1]}_gather_{type[0]}{index_or_offset}";
+        let ws: WildString = s.parse().unwrap();
+        assert_eq!(ws.to_string(), s)
+    }
+}

From cf8b3515e2ce399cf7f3143cf543e1b59ba9ff05 Mon Sep 17 00:00:00 2001
From: Jamie Cunliffe <Jamie.Cunliffe@arm.com>
Date: Fri, 4 Aug 2023 17:05:05 +0100
Subject: [PATCH 3/6] Type definitions and intrinsics for SVE

Co-authored-by: Luca Vizzarro <Luca.Vizzarro@arm.com>
Co-authored-by: Adam Gemmell <adam.gemmell@arm.com>
Co-authored-by: Jacob Bramley <jacob.bramley@arm.com>
---
 crates/assert-instr-macro/Cargo.toml          |     2 +-
 crates/assert-instr-macro/src/lib.rs          |    65 +-
 crates/core_arch/src/aarch64/mod.rs           |     3 +
 .../src/aarch64/sve/ld_st_tests_sve.rs        |  9345 ++++
 .../src/aarch64/sve/ld_st_tests_sve2.rs       |  2482 +
 crates/core_arch/src/aarch64/sve/mod.rs       |    33 +
 crates/core_arch/src/aarch64/sve/sve.rs       | 46098 ++++++++++++++++
 crates/core_arch/src/aarch64/sve/sve2.rs      | 24008 ++++++++
 crates/core_arch/src/aarch64/sve/types.rs     |   285 +
 crates/core_arch/src/lib.rs                   |    15 +-
 crates/core_arch/src/macros.rs                |    16 +
 crates/core_arch/src/simd_llvm.rs             |     5 +-
 12 files changed, 82344 insertions(+), 13 deletions(-)
 create mode 100644 crates/core_arch/src/aarch64/sve/ld_st_tests_sve.rs
 create mode 100644 crates/core_arch/src/aarch64/sve/ld_st_tests_sve2.rs
 create mode 100644 crates/core_arch/src/aarch64/sve/mod.rs
 create mode 100644 crates/core_arch/src/aarch64/sve/sve.rs
 create mode 100644 crates/core_arch/src/aarch64/sve/sve2.rs
 create mode 100644 crates/core_arch/src/aarch64/sve/types.rs

diff --git a/crates/assert-instr-macro/Cargo.toml b/crates/assert-instr-macro/Cargo.toml
index 881c8109c1..a7440cd57e 100644
--- a/crates/assert-instr-macro/Cargo.toml
+++ b/crates/assert-instr-macro/Cargo.toml
@@ -11,4 +11,4 @@ test = false
 [dependencies]
 proc-macro2 = "1.0"
 quote = "1.0"
-syn = { version = "2.0", features = ["full"] }
+syn = { version = "2.0", features = ["full", "extra-traits"] }
diff --git a/crates/assert-instr-macro/src/lib.rs b/crates/assert-instr-macro/src/lib.rs
index 4821a31617..72e6c7fdc3 100644
--- a/crates/assert-instr-macro/src/lib.rs
+++ b/crates/assert-instr-macro/src/lib.rs
@@ -14,6 +14,7 @@ extern crate quote;
 
 use proc_macro2::TokenStream;
 use quote::ToTokens;
+use syn::spanned::Spanned;
 
 #[proc_macro_attribute]
 pub fn assert_instr(
@@ -79,17 +80,17 @@ pub fn assert_instr(
     let ret = &func.sig.output;
     for arg in func.sig.inputs.iter() {
         let capture = match *arg {
-            syn::FnArg::Typed(ref c) => c,
+            syn::FnArg::Typed(ref c) => c.to_owned(),
             ref v => panic!(
                 "arguments must not have patterns: `{:?}`",
                 v.clone().into_token_stream()
             ),
         };
-        let ident = match *capture.pat {
-            syn::Pat::Ident(ref i) => &i.ident,
+        let ident = match capture.pat.as_ref() {
+            syn::Pat::Ident(i) => i.ident.to_owned(),
             _ => panic!("must have bare arguments"),
         };
-        if let Some((_, tokens)) = invoc.args.iter().find(|a| *ident == a.0) {
+        if let Some(&(_, ref tokens)) = invoc.args.iter().find(|a| ident == a.0) {
             input_vals.push(quote! { #tokens });
         } else {
             inputs.push(capture);
@@ -97,15 +98,45 @@ pub fn assert_instr(
         }
     }
     for arg in func.sig.generics.params.iter() {
-        let c = match *arg {
-            syn::GenericParam::Const(ref c) => c,
+        let (ident, is_ty) = match *arg {
+            syn::GenericParam::Const(ref c) => (&c.ident, false),
+            syn::GenericParam::Type(ref t) => (&t.ident, true),
             ref v => panic!(
                 "only const generics are allowed: `{:?}`",
                 v.clone().into_token_stream()
             ),
         };
-        if let Some((_, tokens)) = invoc.args.iter().find(|a| c.ident == a.0) {
-            const_vals.push(quote! { #tokens });
+        if let Some(&(_, ref tokens)) = invoc.args.iter().find(|a| ident == &a.0) {
+            if is_ty {
+                if let syn::Expr::Path(syn::ExprPath { qself, path, .. }) = tokens {
+                    const_vals.push(syn::Ident::new("_", tokens.span()).to_token_stream());
+
+                    let generic_ty_value = syn::TypePath {
+                        qself: qself.clone(),
+                        path: path.clone(),
+                    };
+
+                    inputs.iter_mut().for_each(|arg| {
+                        update_type_path(arg.ty.as_mut(), |type_path: &mut syn::TypePath| {
+                            if let Some(syn::PathSegment {
+                                ident: last_ident, ..
+                            }) = type_path.path.segments.last_mut()
+                            {
+                                if last_ident == ident {
+                                    *type_path = generic_ty_value.to_owned()
+                                }
+                            }
+                        })
+                    });
+                } else {
+                    panic!(
+                        "invalid generic type value {:?} given",
+                        tokens.to_token_stream()
+                    )
+                }
+            } else {
+                const_vals.push(quote! { #tokens });
+            }
         } else {
             panic!("const generics must have a value for tests");
         }
@@ -258,3 +289,21 @@ where
         }
     }
 }
+
+fn update_type_path<F>(ty: &mut syn::Type, fn_ptr: F)
+where
+    F: Fn(&mut syn::TypePath),
+{
+    use syn::Type::*;
+    match ty {
+        Array(syn::TypeArray { elem, .. })
+        | Group(syn::TypeGroup { elem, .. })
+        | Paren(syn::TypeParen { elem, .. })
+        | Ptr(syn::TypePtr { elem, .. })
+        | Reference(syn::TypeReference { elem, .. })
+        | Slice(syn::TypeSlice { elem, .. }) => update_type_path(elem.as_mut(), fn_ptr),
+        Path(path @ syn::TypePath { .. }) => fn_ptr(path),
+        Tuple(..) => panic!("tuples and generic types together are not yet supported"),
+        _ => {}
+    }
+}
diff --git a/crates/core_arch/src/aarch64/mod.rs b/crates/core_arch/src/aarch64/mod.rs
index fefd2f4780..b5a09456de 100644
--- a/crates/core_arch/src/aarch64/mod.rs
+++ b/crates/core_arch/src/aarch64/mod.rs
@@ -13,6 +13,9 @@ mod neon;
 #[stable(feature = "neon_intrinsics", since = "1.59.0")]
 pub use self::neon::*;
 
+#[unstable(feature = "stdarch_aarch64_sve", issue = "99999999")]
+pub mod sve;
+
 mod tme;
 #[unstable(feature = "stdarch_aarch64_tme", issue = "117216")]
 pub use self::tme::*;
diff --git a/crates/core_arch/src/aarch64/sve/ld_st_tests_sve.rs b/crates/core_arch/src/aarch64/sve/ld_st_tests_sve.rs
new file mode 100644
index 0000000000..1a221f8d62
--- /dev/null
+++ b/crates/core_arch/src/aarch64/sve/ld_st_tests_sve.rs
@@ -0,0 +1,9345 @@
+// This code is automatically generated. DO NOT MODIFY.
+//
+// Instead, modify `crates/stdarch-gen2/spec/sve` and run the following command to re-generate this
+// file:
+//
+// ```
+// cargo run --bin=stdarch-gen2 -- crates/stdarch-gen2/spec
+// ```
+#![allow(unused)]
+use super::*;
+use std::boxed::Box;
+use std::convert::{TryFrom, TryInto};
+use std::sync::LazyLock;
+use std::vec::Vec;
+use stdarch_test::simd_test;
+static F32_DATA: LazyLock<[f32; 64 * 5]> = LazyLock::new(|| {
+    (0..64 * 5)
+        .map(|i| i as f32)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("f32 data incorrectly initialised")
+});
+static F64_DATA: LazyLock<[f64; 32 * 5]> = LazyLock::new(|| {
+    (0..32 * 5)
+        .map(|i| i as f64)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("f64 data incorrectly initialised")
+});
+static I8_DATA: LazyLock<[i8; 256 * 5]> = LazyLock::new(|| {
+    (0..256 * 5)
+        .map(|i| ((i + 128) % 256 - 128) as i8)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("i8 data incorrectly initialised")
+});
+static I16_DATA: LazyLock<[i16; 128 * 5]> = LazyLock::new(|| {
+    (0..128 * 5)
+        .map(|i| i as i16)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("i16 data incorrectly initialised")
+});
+static I32_DATA: LazyLock<[i32; 64 * 5]> = LazyLock::new(|| {
+    (0..64 * 5)
+        .map(|i| i as i32)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("i32 data incorrectly initialised")
+});
+static I64_DATA: LazyLock<[i64; 32 * 5]> = LazyLock::new(|| {
+    (0..32 * 5)
+        .map(|i| i as i64)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("i64 data incorrectly initialised")
+});
+static U8_DATA: LazyLock<[u8; 256 * 5]> = LazyLock::new(|| {
+    (0..256 * 5)
+        .map(|i| i as u8)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("u8 data incorrectly initialised")
+});
+static U16_DATA: LazyLock<[u16; 128 * 5]> = LazyLock::new(|| {
+    (0..128 * 5)
+        .map(|i| i as u16)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("u16 data incorrectly initialised")
+});
+static U32_DATA: LazyLock<[u32; 64 * 5]> = LazyLock::new(|| {
+    (0..64 * 5)
+        .map(|i| i as u32)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("u32 data incorrectly initialised")
+});
+static U64_DATA: LazyLock<[u64; 32 * 5]> = LazyLock::new(|| {
+    (0..32 * 5)
+        .map(|i| i as u64)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("u64 data incorrectly initialised")
+});
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_f32(vector: svfloat32_t, expected: svfloat32_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b32(), defined));
+    let cmp = svcmpne_f32(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_f64(vector: svfloat64_t, expected: svfloat64_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b64(), defined));
+    let cmp = svcmpne_f64(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_i8(vector: svint8_t, expected: svint8_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b8(), defined));
+    let cmp = svcmpne_s8(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_i16(vector: svint16_t, expected: svint16_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b16(), defined));
+    let cmp = svcmpne_s16(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_i32(vector: svint32_t, expected: svint32_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b32(), defined));
+    let cmp = svcmpne_s32(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_i64(vector: svint64_t, expected: svint64_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b64(), defined));
+    let cmp = svcmpne_s64(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_u8(vector: svuint8_t, expected: svuint8_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b8(), defined));
+    let cmp = svcmpne_u8(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_u16(vector: svuint16_t, expected: svuint16_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b16(), defined));
+    let cmp = svcmpne_u16(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_u32(vector: svuint32_t, expected: svuint32_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b32(), defined));
+    let cmp = svcmpne_u32(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_u64(vector: svuint64_t, expected: svuint64_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b64(), defined));
+    let cmp = svcmpne_u64(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_f32_with_svst1_f32() {
+    let mut storage = [0 as f32; 320usize];
+    let data = svcvt_f32_s32_x(
+        svptrue_b32(),
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    svst1_f32(svptrue_b32(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f32 || val == i as f32);
+    }
+    svsetffr();
+    let loaded = svld1_f32(svptrue_b32(), storage.as_ptr() as *const f32);
+    assert_vector_matches_f32(
+        loaded,
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_f64_with_svst1_f64() {
+    let mut storage = [0 as f64; 160usize];
+    let data = svcvt_f64_s64_x(
+        svptrue_b64(),
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    svst1_f64(svptrue_b64(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svld1_f64(svptrue_b64(), storage.as_ptr() as *const f64);
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_s8_with_svst1_s8() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1_s8(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1_s8(svptrue_b8(), storage.as_ptr() as *const i8);
+    assert_vector_matches_i8(
+        loaded,
+        svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_s16_with_svst1_s16() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1_s16(svptrue_b16(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1_s16(svptrue_b16(), storage.as_ptr() as *const i16);
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_s32_with_svst1_s32() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1_s32(svptrue_b32(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1_s32(svptrue_b32(), storage.as_ptr() as *const i32);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_s64_with_svst1_s64() {
+    let mut storage = [0 as i64; 160usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1_s64(svptrue_b64(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svld1_s64(svptrue_b64(), storage.as_ptr() as *const i64);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_u8_with_svst1_u8() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1_u8(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1_u8(svptrue_b8(), storage.as_ptr() as *const u8);
+    assert_vector_matches_u8(
+        loaded,
+        svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_u16_with_svst1_u16() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1_u16(svptrue_b16(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded = svld1_u16(svptrue_b16(), storage.as_ptr() as *const u16);
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_u32_with_svst1_u32() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1_u32(svptrue_b32(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svld1_u32(svptrue_b32(), storage.as_ptr() as *const u32);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_u64_with_svst1_u64() {
+    let mut storage = [0 as u64; 160usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1_u64(svptrue_b64(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svld1_u64(svptrue_b64(), storage.as_ptr() as *const u64);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_s32index_f32_with_svst1_scatter_s32index_f32() {
+    let mut storage = [0 as f32; 320usize];
+    let data = svcvt_f32_s32_x(
+        svptrue_b32(),
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let indices = svindex_s32(0, 1);
+    svst1_scatter_s32index_f32(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f32 || val == i as f32);
+    }
+    svsetffr();
+    let loaded = svld1_gather_s32index_f32(svptrue_b32(), storage.as_ptr() as *const f32, indices);
+    assert_vector_matches_f32(
+        loaded,
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_s32index_s32_with_svst1_scatter_s32index_s32() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s32(0, 1);
+    svst1_scatter_s32index_s32(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1_gather_s32index_s32(svptrue_b32(), storage.as_ptr() as *const i32, indices);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_s32index_u32_with_svst1_scatter_s32index_u32() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s32(0, 1);
+    svst1_scatter_s32index_u32(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svld1_gather_s32index_u32(svptrue_b32(), storage.as_ptr() as *const u32, indices);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_s64index_f64_with_svst1_scatter_s64index_f64() {
+    let mut storage = [0 as f64; 160usize];
+    let data = svcvt_f64_s64_x(
+        svptrue_b64(),
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let indices = svindex_s64(0, 1);
+    svst1_scatter_s64index_f64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_s64index_f64(svptrue_b64(), storage.as_ptr() as *const f64, indices);
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_s64index_s64_with_svst1_scatter_s64index_s64() {
+    let mut storage = [0 as i64; 160usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svst1_scatter_s64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_s64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_s64index_u64_with_svst1_scatter_s64index_u64() {
+    let mut storage = [0 as u64; 160usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svst1_scatter_s64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_s64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u32index_f32_with_svst1_scatter_u32index_f32() {
+    let mut storage = [0 as f32; 320usize];
+    let data = svcvt_f32_s32_x(
+        svptrue_b32(),
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let indices = svindex_u32(0, 1);
+    svst1_scatter_u32index_f32(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f32 || val == i as f32);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u32index_f32(svptrue_b32(), storage.as_ptr() as *const f32, indices);
+    assert_vector_matches_f32(
+        loaded,
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u32index_s32_with_svst1_scatter_u32index_s32() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u32(0, 1);
+    svst1_scatter_u32index_s32(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u32index_s32(svptrue_b32(), storage.as_ptr() as *const i32, indices);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u32index_u32_with_svst1_scatter_u32index_u32() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u32(0, 1);
+    svst1_scatter_u32index_u32(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u32index_u32(svptrue_b32(), storage.as_ptr() as *const u32, indices);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u64index_f64_with_svst1_scatter_u64index_f64() {
+    let mut storage = [0 as f64; 160usize];
+    let data = svcvt_f64_s64_x(
+        svptrue_b64(),
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let indices = svindex_u64(0, 1);
+    svst1_scatter_u64index_f64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u64index_f64(svptrue_b64(), storage.as_ptr() as *const f64, indices);
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u64index_s64_with_svst1_scatter_u64index_s64() {
+    let mut storage = [0 as i64; 160usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svst1_scatter_u64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u64index_u64_with_svst1_scatter_u64index_u64() {
+    let mut storage = [0 as u64; 160usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svst1_scatter_u64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_s32offset_f32_with_svst1_scatter_s32offset_f32() {
+    let mut storage = [0 as f32; 320usize];
+    let data = svcvt_f32_s32_x(
+        svptrue_b32(),
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let offsets = svindex_s32(0, 4u32.try_into().unwrap());
+    svst1_scatter_s32offset_f32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f32 || val == i as f32);
+    }
+    svsetffr();
+    let loaded = svld1_gather_s32offset_f32(svptrue_b32(), storage.as_ptr() as *const f32, offsets);
+    assert_vector_matches_f32(
+        loaded,
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_s32offset_s32_with_svst1_scatter_s32offset_s32() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s32(0, 4u32.try_into().unwrap());
+    svst1_scatter_s32offset_s32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1_gather_s32offset_s32(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_s32offset_u32_with_svst1_scatter_s32offset_u32() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s32(0, 4u32.try_into().unwrap());
+    svst1_scatter_s32offset_u32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svld1_gather_s32offset_u32(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_s64offset_f64_with_svst1_scatter_s64offset_f64() {
+    let mut storage = [0 as f64; 160usize];
+    let data = svcvt_f64_s64_x(
+        svptrue_b64(),
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let offsets = svindex_s64(0, 8u32.try_into().unwrap());
+    svst1_scatter_s64offset_f64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_s64offset_f64(svptrue_b64(), storage.as_ptr() as *const f64, offsets);
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_s64offset_s64_with_svst1_scatter_s64offset_s64() {
+    let mut storage = [0 as i64; 160usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 8u32.try_into().unwrap());
+    svst1_scatter_s64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_s64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_s64offset_u64_with_svst1_scatter_s64offset_u64() {
+    let mut storage = [0 as u64; 160usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 8u32.try_into().unwrap());
+    svst1_scatter_s64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_s64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u32offset_f32_with_svst1_scatter_u32offset_f32() {
+    let mut storage = [0 as f32; 320usize];
+    let data = svcvt_f32_s32_x(
+        svptrue_b32(),
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let offsets = svindex_u32(0, 4u32.try_into().unwrap());
+    svst1_scatter_u32offset_f32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f32 || val == i as f32);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u32offset_f32(svptrue_b32(), storage.as_ptr() as *const f32, offsets);
+    assert_vector_matches_f32(
+        loaded,
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u32offset_s32_with_svst1_scatter_u32offset_s32() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 4u32.try_into().unwrap());
+    svst1_scatter_u32offset_s32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u32offset_s32(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u32offset_u32_with_svst1_scatter_u32offset_u32() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 4u32.try_into().unwrap());
+    svst1_scatter_u32offset_u32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u32offset_u32(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u64offset_f64_with_svst1_scatter_u64offset_f64() {
+    let mut storage = [0 as f64; 160usize];
+    let data = svcvt_f64_s64_x(
+        svptrue_b64(),
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    svst1_scatter_u64offset_f64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u64offset_f64(svptrue_b64(), storage.as_ptr() as *const f64, offsets);
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u64offset_s64_with_svst1_scatter_u64offset_s64() {
+    let mut storage = [0 as i64; 160usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    svst1_scatter_u64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u64offset_u64_with_svst1_scatter_u64offset_u64() {
+    let mut storage = [0 as u64; 160usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    svst1_scatter_u64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u64base_f64_with_svst1_scatter_u64base_f64() {
+    let mut storage = [0 as f64; 160usize];
+    let data = svcvt_f64_s64_x(
+        svptrue_b64(),
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svst1_scatter_u64base_f64(svptrue_b64(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u64base_f64(svptrue_b64(), bases);
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u64base_s64_with_svst1_scatter_u64base_s64() {
+    let mut storage = [0 as i64; 160usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svst1_scatter_u64base_s64(svptrue_b64(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u64base_s64(svptrue_b64(), bases);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u64base_u64_with_svst1_scatter_u64base_u64() {
+    let mut storage = [0 as u64; 160usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svst1_scatter_u64base_u64(svptrue_b64(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u64base_u64(svptrue_b64(), bases);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u32base_index_f32_with_svst1_scatter_u32base_index_f32() {
+    let mut storage = [0 as f32; 320usize];
+    let data = svcvt_f32_s32_x(
+        svptrue_b32(),
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let bases = svindex_u32(0, 4u32.try_into().unwrap());
+    svst1_scatter_u32base_index_f32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 / (4u32 as i64) + 1,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f32 || val == i as f32);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u32base_index_f32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 / (4u32 as i64) + 1,
+    );
+    assert_vector_matches_f32(
+        loaded,
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u32base_index_s32_with_svst1_scatter_u32base_index_s32() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 4u32.try_into().unwrap());
+    svst1_scatter_u32base_index_s32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 / (4u32 as i64) + 1,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u32base_index_s32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 / (4u32 as i64) + 1,
+    );
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u32base_index_u32_with_svst1_scatter_u32base_index_u32() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 4u32.try_into().unwrap());
+    svst1_scatter_u32base_index_u32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 / (4u32 as i64) + 1,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u32base_index_u32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 / (4u32 as i64) + 1,
+    );
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u64base_index_f64_with_svst1_scatter_u64base_index_f64() {
+    let mut storage = [0 as f64; 160usize];
+    let data = svcvt_f64_s64_x(
+        svptrue_b64(),
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svst1_scatter_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap());
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u64base_index_s64_with_svst1_scatter_u64base_index_s64() {
+    let mut storage = [0 as i64; 160usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svst1_scatter_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u64base_index_u64_with_svst1_scatter_u64base_index_u64() {
+    let mut storage = [0 as u64; 160usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svst1_scatter_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u32base_offset_f32_with_svst1_scatter_u32base_offset_f32() {
+    let mut storage = [0 as f32; 320usize];
+    let data = svcvt_f32_s32_x(
+        svptrue_b32(),
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let bases = svindex_u32(0, 4u32.try_into().unwrap());
+    svst1_scatter_u32base_offset_f32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 + 4u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f32 || val == i as f32);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u32base_offset_f32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 + 4u32 as i64,
+    );
+    assert_vector_matches_f32(
+        loaded,
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u32base_offset_s32_with_svst1_scatter_u32base_offset_s32() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 4u32.try_into().unwrap());
+    svst1_scatter_u32base_offset_s32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 + 4u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u32base_offset_s32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 + 4u32 as i64,
+    );
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u32base_offset_u32_with_svst1_scatter_u32base_offset_u32() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 4u32.try_into().unwrap());
+    svst1_scatter_u32base_offset_u32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 + 4u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u32base_offset_u32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 + 4u32 as i64,
+    );
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u64base_offset_f64_with_svst1_scatter_u64base_offset_f64() {
+    let mut storage = [0 as f64; 160usize];
+    let data = svcvt_f64_s64_x(
+        svptrue_b64(),
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svst1_scatter_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap());
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u64base_offset_s64_with_svst1_scatter_u64base_offset_s64() {
+    let mut storage = [0 as i64; 160usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svst1_scatter_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_gather_u64base_offset_u64_with_svst1_scatter_u64base_offset_u64() {
+    let mut storage = [0 as u64; 160usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svst1_scatter_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svld1_gather_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_vnum_f32_with_svst1_vnum_f32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as f32; 320usize];
+    let data = svcvt_f32_s32_x(
+        svptrue_b32(),
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+    svst1_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f32 || val == i as f32);
+    }
+    svsetffr();
+    let loaded = svld1_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1);
+    assert_vector_matches_f32(
+        loaded,
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 0usize).try_into().unwrap(),
+                1usize.try_into().unwrap(),
+            ),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_vnum_f64_with_svst1_vnum_f64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as f64; 160usize];
+    let data = svcvt_f64_s64_x(
+        svptrue_b64(),
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+    svst1_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svld1_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1);
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 0usize).try_into().unwrap(),
+                1usize.try_into().unwrap(),
+            ),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_vnum_s8_with_svst1_vnum_s8() {
+    let len = svcntb() as usize;
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s8(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1);
+    assert_vector_matches_i8(
+        loaded,
+        svindex_s8(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_vnum_s16_with_svst1_vnum_s16() {
+    let len = svcnth() as usize;
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s16(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1);
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_vnum_s32_with_svst1_vnum_s32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s32(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_vnum_s64_with_svst1_vnum_s64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as i64; 160usize];
+    let data = svindex_s64(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svld1_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_vnum_u8_with_svst1_vnum_u8() {
+    let len = svcntb() as usize;
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u8(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1);
+    assert_vector_matches_u8(
+        loaded,
+        svindex_u8(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_vnum_u16_with_svst1_vnum_u16() {
+    let len = svcnth() as usize;
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u16(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded = svld1_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1);
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_vnum_u32_with_svst1_vnum_u32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u32(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svld1_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1_vnum_u64_with_svst1_vnum_u64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as u64; 160usize];
+    let data = svindex_u64(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svld1_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve,f64mm")]
+unsafe fn test_svld1ro_f32() {
+    if svcntb() < 32 {
+        println!("Skipping test_svld1ro_f32 due to SVE vector length");
+        return;
+    }
+    svsetffr();
+    let loaded = svld1ro_f32(svptrue_b32(), F32_DATA.as_ptr());
+    assert_vector_matches_f32(
+        loaded,
+        svtrn1q_f32(
+            svdupq_n_f32(0usize as f32, 1usize as f32, 2usize as f32, 3usize as f32),
+            svdupq_n_f32(4usize as f32, 5usize as f32, 6usize as f32, 7usize as f32),
+        ),
+    );
+}
+#[simd_test(enable = "sve,f64mm")]
+unsafe fn test_svld1ro_f64() {
+    if svcntb() < 32 {
+        println!("Skipping test_svld1ro_f64 due to SVE vector length");
+        return;
+    }
+    svsetffr();
+    let loaded = svld1ro_f64(svptrue_b64(), F64_DATA.as_ptr());
+    assert_vector_matches_f64(
+        loaded,
+        svtrn1q_f64(
+            svdupq_n_f64(0usize as f64, 1usize as f64),
+            svdupq_n_f64(2usize as f64, 3usize as f64),
+        ),
+    );
+}
+#[simd_test(enable = "sve,f64mm")]
+unsafe fn test_svld1ro_s8() {
+    if svcntb() < 32 {
+        println!("Skipping test_svld1ro_s8 due to SVE vector length");
+        return;
+    }
+    svsetffr();
+    let loaded = svld1ro_s8(svptrue_b8(), I8_DATA.as_ptr());
+    assert_vector_matches_i8(
+        loaded,
+        svtrn1q_s8(
+            svdupq_n_s8(
+                0usize as i8,
+                1usize as i8,
+                2usize as i8,
+                3usize as i8,
+                4usize as i8,
+                5usize as i8,
+                6usize as i8,
+                7usize as i8,
+                8usize as i8,
+                9usize as i8,
+                10usize as i8,
+                11usize as i8,
+                12usize as i8,
+                13usize as i8,
+                14usize as i8,
+                15usize as i8,
+            ),
+            svdupq_n_s8(
+                16usize as i8,
+                17usize as i8,
+                18usize as i8,
+                19usize as i8,
+                20usize as i8,
+                21usize as i8,
+                22usize as i8,
+                23usize as i8,
+                24usize as i8,
+                25usize as i8,
+                26usize as i8,
+                27usize as i8,
+                28usize as i8,
+                29usize as i8,
+                30usize as i8,
+                31usize as i8,
+            ),
+        ),
+    );
+}
+#[simd_test(enable = "sve,f64mm")]
+unsafe fn test_svld1ro_s16() {
+    if svcntb() < 32 {
+        println!("Skipping test_svld1ro_s16 due to SVE vector length");
+        return;
+    }
+    svsetffr();
+    let loaded = svld1ro_s16(svptrue_b16(), I16_DATA.as_ptr());
+    assert_vector_matches_i16(
+        loaded,
+        svtrn1q_s16(
+            svdupq_n_s16(
+                0usize as i16,
+                1usize as i16,
+                2usize as i16,
+                3usize as i16,
+                4usize as i16,
+                5usize as i16,
+                6usize as i16,
+                7usize as i16,
+            ),
+            svdupq_n_s16(
+                8usize as i16,
+                9usize as i16,
+                10usize as i16,
+                11usize as i16,
+                12usize as i16,
+                13usize as i16,
+                14usize as i16,
+                15usize as i16,
+            ),
+        ),
+    );
+}
+#[simd_test(enable = "sve,f64mm")]
+unsafe fn test_svld1ro_s32() {
+    if svcntb() < 32 {
+        println!("Skipping test_svld1ro_s32 due to SVE vector length");
+        return;
+    }
+    svsetffr();
+    let loaded = svld1ro_s32(svptrue_b32(), I32_DATA.as_ptr());
+    assert_vector_matches_i32(
+        loaded,
+        svtrn1q_s32(
+            svdupq_n_s32(0usize as i32, 1usize as i32, 2usize as i32, 3usize as i32),
+            svdupq_n_s32(4usize as i32, 5usize as i32, 6usize as i32, 7usize as i32),
+        ),
+    );
+}
+#[simd_test(enable = "sve,f64mm")]
+unsafe fn test_svld1ro_s64() {
+    if svcntb() < 32 {
+        println!("Skipping test_svld1ro_s64 due to SVE vector length");
+        return;
+    }
+    svsetffr();
+    let loaded = svld1ro_s64(svptrue_b64(), I64_DATA.as_ptr());
+    assert_vector_matches_i64(
+        loaded,
+        svtrn1q_s64(
+            svdupq_n_s64(0usize as i64, 1usize as i64),
+            svdupq_n_s64(2usize as i64, 3usize as i64),
+        ),
+    );
+}
+#[simd_test(enable = "sve,f64mm")]
+unsafe fn test_svld1ro_u8() {
+    if svcntb() < 32 {
+        println!("Skipping test_svld1ro_u8 due to SVE vector length");
+        return;
+    }
+    svsetffr();
+    let loaded = svld1ro_u8(svptrue_b8(), U8_DATA.as_ptr());
+    assert_vector_matches_u8(
+        loaded,
+        svtrn1q_u8(
+            svdupq_n_u8(
+                0usize as u8,
+                1usize as u8,
+                2usize as u8,
+                3usize as u8,
+                4usize as u8,
+                5usize as u8,
+                6usize as u8,
+                7usize as u8,
+                8usize as u8,
+                9usize as u8,
+                10usize as u8,
+                11usize as u8,
+                12usize as u8,
+                13usize as u8,
+                14usize as u8,
+                15usize as u8,
+            ),
+            svdupq_n_u8(
+                16usize as u8,
+                17usize as u8,
+                18usize as u8,
+                19usize as u8,
+                20usize as u8,
+                21usize as u8,
+                22usize as u8,
+                23usize as u8,
+                24usize as u8,
+                25usize as u8,
+                26usize as u8,
+                27usize as u8,
+                28usize as u8,
+                29usize as u8,
+                30usize as u8,
+                31usize as u8,
+            ),
+        ),
+    );
+}
+#[simd_test(enable = "sve,f64mm")]
+unsafe fn test_svld1ro_u16() {
+    if svcntb() < 32 {
+        println!("Skipping test_svld1ro_u16 due to SVE vector length");
+        return;
+    }
+    svsetffr();
+    let loaded = svld1ro_u16(svptrue_b16(), U16_DATA.as_ptr());
+    assert_vector_matches_u16(
+        loaded,
+        svtrn1q_u16(
+            svdupq_n_u16(
+                0usize as u16,
+                1usize as u16,
+                2usize as u16,
+                3usize as u16,
+                4usize as u16,
+                5usize as u16,
+                6usize as u16,
+                7usize as u16,
+            ),
+            svdupq_n_u16(
+                8usize as u16,
+                9usize as u16,
+                10usize as u16,
+                11usize as u16,
+                12usize as u16,
+                13usize as u16,
+                14usize as u16,
+                15usize as u16,
+            ),
+        ),
+    );
+}
+#[simd_test(enable = "sve,f64mm")]
+unsafe fn test_svld1ro_u32() {
+    if svcntb() < 32 {
+        println!("Skipping test_svld1ro_u32 due to SVE vector length");
+        return;
+    }
+    svsetffr();
+    let loaded = svld1ro_u32(svptrue_b32(), U32_DATA.as_ptr());
+    assert_vector_matches_u32(
+        loaded,
+        svtrn1q_u32(
+            svdupq_n_u32(0usize as u32, 1usize as u32, 2usize as u32, 3usize as u32),
+            svdupq_n_u32(4usize as u32, 5usize as u32, 6usize as u32, 7usize as u32),
+        ),
+    );
+}
+#[simd_test(enable = "sve,f64mm")]
+unsafe fn test_svld1ro_u64() {
+    if svcntb() < 32 {
+        println!("Skipping test_svld1ro_u64 due to SVE vector length");
+        return;
+    }
+    svsetffr();
+    let loaded = svld1ro_u64(svptrue_b64(), U64_DATA.as_ptr());
+    assert_vector_matches_u64(
+        loaded,
+        svtrn1q_u64(
+            svdupq_n_u64(0usize as u64, 1usize as u64),
+            svdupq_n_u64(2usize as u64, 3usize as u64),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1rq_f32() {
+    svsetffr();
+    let loaded = svld1rq_f32(svptrue_b32(), F32_DATA.as_ptr());
+    assert_vector_matches_f32(
+        loaded,
+        svdupq_n_f32(0usize as f32, 1usize as f32, 2usize as f32, 3usize as f32),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1rq_f64() {
+    svsetffr();
+    let loaded = svld1rq_f64(svptrue_b64(), F64_DATA.as_ptr());
+    assert_vector_matches_f64(loaded, svdupq_n_f64(0usize as f64, 1usize as f64));
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1rq_s8() {
+    svsetffr();
+    let loaded = svld1rq_s8(svptrue_b8(), I8_DATA.as_ptr());
+    assert_vector_matches_i8(
+        loaded,
+        svdupq_n_s8(
+            0usize as i8,
+            1usize as i8,
+            2usize as i8,
+            3usize as i8,
+            4usize as i8,
+            5usize as i8,
+            6usize as i8,
+            7usize as i8,
+            8usize as i8,
+            9usize as i8,
+            10usize as i8,
+            11usize as i8,
+            12usize as i8,
+            13usize as i8,
+            14usize as i8,
+            15usize as i8,
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1rq_s16() {
+    svsetffr();
+    let loaded = svld1rq_s16(svptrue_b16(), I16_DATA.as_ptr());
+    assert_vector_matches_i16(
+        loaded,
+        svdupq_n_s16(
+            0usize as i16,
+            1usize as i16,
+            2usize as i16,
+            3usize as i16,
+            4usize as i16,
+            5usize as i16,
+            6usize as i16,
+            7usize as i16,
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1rq_s32() {
+    svsetffr();
+    let loaded = svld1rq_s32(svptrue_b32(), I32_DATA.as_ptr());
+    assert_vector_matches_i32(
+        loaded,
+        svdupq_n_s32(0usize as i32, 1usize as i32, 2usize as i32, 3usize as i32),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1rq_s64() {
+    svsetffr();
+    let loaded = svld1rq_s64(svptrue_b64(), I64_DATA.as_ptr());
+    assert_vector_matches_i64(loaded, svdupq_n_s64(0usize as i64, 1usize as i64));
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1rq_u8() {
+    svsetffr();
+    let loaded = svld1rq_u8(svptrue_b8(), U8_DATA.as_ptr());
+    assert_vector_matches_u8(
+        loaded,
+        svdupq_n_u8(
+            0usize as u8,
+            1usize as u8,
+            2usize as u8,
+            3usize as u8,
+            4usize as u8,
+            5usize as u8,
+            6usize as u8,
+            7usize as u8,
+            8usize as u8,
+            9usize as u8,
+            10usize as u8,
+            11usize as u8,
+            12usize as u8,
+            13usize as u8,
+            14usize as u8,
+            15usize as u8,
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1rq_u16() {
+    svsetffr();
+    let loaded = svld1rq_u16(svptrue_b16(), U16_DATA.as_ptr());
+    assert_vector_matches_u16(
+        loaded,
+        svdupq_n_u16(
+            0usize as u16,
+            1usize as u16,
+            2usize as u16,
+            3usize as u16,
+            4usize as u16,
+            5usize as u16,
+            6usize as u16,
+            7usize as u16,
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1rq_u32() {
+    svsetffr();
+    let loaded = svld1rq_u32(svptrue_b32(), U32_DATA.as_ptr());
+    assert_vector_matches_u32(
+        loaded,
+        svdupq_n_u32(0usize as u32, 1usize as u32, 2usize as u32, 3usize as u32),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1rq_u64() {
+    svsetffr();
+    let loaded = svld1rq_u64(svptrue_b64(), U64_DATA.as_ptr());
+    assert_vector_matches_u64(loaded, svdupq_n_u64(0usize as u64, 1usize as u64));
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_gather_s32offset_s32_with_svst1b_scatter_s32offset_s32() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s32(0, 1u32.try_into().unwrap());
+    svst1b_scatter_s32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1sb_gather_s32offset_s32(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_s32offset_s32_with_svst1h_scatter_s32offset_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s32(0, 2u32.try_into().unwrap());
+    svst1h_scatter_s32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svld1sh_gather_s32offset_s32(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_gather_s32offset_u32_with_svst1b_scatter_s32offset_u32() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s32(0, 1u32.try_into().unwrap());
+    svst1b_scatter_s32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1sb_gather_s32offset_u32(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_s32offset_u32_with_svst1h_scatter_s32offset_u32() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s32(0, 2u32.try_into().unwrap());
+    svst1h_scatter_s32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svld1sh_gather_s32offset_u32(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_gather_s64offset_s64_with_svst1b_scatter_s64offset_s64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 1u32.try_into().unwrap());
+    svst1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1sb_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_s64offset_s64_with_svst1h_scatter_s64offset_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 2u32.try_into().unwrap());
+    svst1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svld1sh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sw_gather_s64offset_s64_with_svst1w_scatter_s64offset_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 4u32.try_into().unwrap());
+    svst1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svld1sw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_gather_s64offset_u64_with_svst1b_scatter_s64offset_u64() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 1u32.try_into().unwrap());
+    svst1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1sb_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_s64offset_u64_with_svst1h_scatter_s64offset_u64() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 2u32.try_into().unwrap());
+    svst1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svld1sh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sw_gather_s64offset_u64_with_svst1w_scatter_s64offset_u64() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 4u32.try_into().unwrap());
+    svst1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded =
+        svld1sw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_gather_u32offset_s32_with_svst1b_scatter_u32offset_s32() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 1u32.try_into().unwrap());
+    svst1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1sb_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_u32offset_s32_with_svst1h_scatter_u32offset_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 2u32.try_into().unwrap());
+    svst1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svld1sh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_gather_u32offset_u32_with_svst1b_scatter_u32offset_u32() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 1u32.try_into().unwrap());
+    svst1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1sb_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_u32offset_u32_with_svst1h_scatter_u32offset_u32() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 2u32.try_into().unwrap());
+    svst1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svld1sh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_gather_u64offset_s64_with_svst1b_scatter_u64offset_s64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    svst1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1sb_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_u64offset_s64_with_svst1h_scatter_u64offset_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    svst1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svld1sh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sw_gather_u64offset_s64_with_svst1w_scatter_u64offset_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    svst1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svld1sw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_gather_u64offset_u64_with_svst1b_scatter_u64offset_u64() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    svst1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1sb_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_u64offset_u64_with_svst1h_scatter_u64offset_u64() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    svst1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svld1sh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sw_gather_u64offset_u64_with_svst1w_scatter_u64offset_u64() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    svst1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded =
+        svld1sw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_gather_u32base_offset_s32_with_svst1b_scatter_u32base_offset_s32() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 1u32.try_into().unwrap());
+    svst1b_scatter_u32base_offset_s32(
+        svptrue_b8(),
+        bases,
+        storage.as_ptr() as i64 + 1u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1sb_gather_u32base_offset_s32(
+        svptrue_b8(),
+        bases,
+        storage.as_ptr() as i64 + 1u32 as i64,
+    );
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_u32base_offset_s32_with_svst1h_scatter_u32base_offset_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 2u32.try_into().unwrap());
+    svst1h_scatter_u32base_offset_s32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 + 2u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1sh_gather_u32base_offset_s32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 + 2u32 as i64,
+    );
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_gather_u32base_offset_u32_with_svst1b_scatter_u32base_offset_u32() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 1u32.try_into().unwrap());
+    svst1b_scatter_u32base_offset_u32(
+        svptrue_b8(),
+        bases,
+        storage.as_ptr() as i64 + 1u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1sb_gather_u32base_offset_u32(
+        svptrue_b8(),
+        bases,
+        storage.as_ptr() as i64 + 1u32 as i64,
+    );
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_u32base_offset_u32_with_svst1h_scatter_u32base_offset_u32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 2u32.try_into().unwrap());
+    svst1h_scatter_u32base_offset_u32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 + 2u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1sh_gather_u32base_offset_u32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 + 2u32 as i64,
+    );
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_gather_u64base_offset_s64_with_svst1b_scatter_u64base_offset_s64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
+    svst1b_scatter_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1sb_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_u64base_offset_s64_with_svst1h_scatter_u64base_offset_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svst1h_scatter_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1sh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sw_gather_u64base_offset_s64_with_svst1w_scatter_u64base_offset_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svst1w_scatter_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1sw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_gather_u64base_offset_u64_with_svst1b_scatter_u64base_offset_u64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
+    svst1b_scatter_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1sb_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_u64base_offset_u64_with_svst1h_scatter_u64base_offset_u64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svst1h_scatter_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1sh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sw_gather_u64base_offset_u64_with_svst1w_scatter_u64base_offset_u64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svst1w_scatter_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1sw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_gather_u64base_s64_with_svst1b_scatter_u64base_s64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
+    svst1b_scatter_u64base_s64(svptrue_b8(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1sb_gather_u64base_s64(svptrue_b8(), bases);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_u64base_s64_with_svst1h_scatter_u64base_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svst1h_scatter_u64base_s64(svptrue_b16(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1sh_gather_u64base_s64(svptrue_b16(), bases);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sw_gather_u64base_s64_with_svst1w_scatter_u64base_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svst1w_scatter_u64base_s64(svptrue_b32(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1sw_gather_u64base_s64(svptrue_b32(), bases);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_gather_u64base_u64_with_svst1b_scatter_u64base_u64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
+    svst1b_scatter_u64base_u64(svptrue_b8(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1sb_gather_u64base_u64(svptrue_b8(), bases);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_u64base_u64_with_svst1h_scatter_u64base_u64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svst1h_scatter_u64base_u64(svptrue_b16(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1sh_gather_u64base_u64(svptrue_b16(), bases);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sw_gather_u64base_u64_with_svst1w_scatter_u64base_u64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svst1w_scatter_u64base_u64(svptrue_b32(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1sw_gather_u64base_u64(svptrue_b32(), bases);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_s16_with_svst1b_s16() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1b_s16(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1sb_s16(svptrue_b8(), storage.as_ptr() as *const i8);
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_s32_with_svst1b_s32() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1b_s32(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1sb_s32(svptrue_b8(), storage.as_ptr() as *const i8);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_s32_with_svst1h_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1h_s32(svptrue_b16(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1sh_s32(svptrue_b16(), storage.as_ptr() as *const i16);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_s64_with_svst1b_s64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1b_s64(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1sb_s64(svptrue_b8(), storage.as_ptr() as *const i8);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_s64_with_svst1h_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1h_s64(svptrue_b16(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1sh_s64(svptrue_b16(), storage.as_ptr() as *const i16);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sw_s64_with_svst1w_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1w_s64(svptrue_b32(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1sw_s64(svptrue_b32(), storage.as_ptr() as *const i32);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_u16_with_svst1b_u16() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1b_u16(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1sb_u16(svptrue_b8(), storage.as_ptr() as *const i8);
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_u32_with_svst1b_u32() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1b_u32(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1sb_u32(svptrue_b8(), storage.as_ptr() as *const i8);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_u32_with_svst1h_u32() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1h_u32(svptrue_b16(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded = svld1sh_u32(svptrue_b16(), storage.as_ptr() as *const i16);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_u64_with_svst1b_u64() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1b_u64(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1sb_u64(svptrue_b8(), storage.as_ptr() as *const i8);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_u64_with_svst1h_u64() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1h_u64(svptrue_b16(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded = svld1sh_u64(svptrue_b16(), storage.as_ptr() as *const i16);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sw_u64_with_svst1w_u64() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1w_u64(svptrue_b32(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svld1sw_u64(svptrue_b32(), storage.as_ptr() as *const i32);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_vnum_s16_with_svst1b_vnum_s16() {
+    let len = svcnth() as usize;
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s16(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1b_vnum_s16(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1sb_vnum_s16(svptrue_b8(), storage.as_ptr() as *const i8, 1);
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_vnum_s32_with_svst1b_vnum_s32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s32(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1b_vnum_s32(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1sb_vnum_s32(svptrue_b8(), storage.as_ptr() as *const i8, 1);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_vnum_s32_with_svst1h_vnum_s32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1h_vnum_s32(svptrue_b16(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1sh_vnum_s32(svptrue_b16(), storage.as_ptr() as *const i16, 1);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_vnum_s64_with_svst1b_vnum_s64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1b_vnum_s64(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1sb_vnum_s64(svptrue_b8(), storage.as_ptr() as *const i8, 1);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_vnum_s64_with_svst1h_vnum_s64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1h_vnum_s64(svptrue_b16(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1sh_vnum_s64(svptrue_b16(), storage.as_ptr() as *const i16, 1);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sw_vnum_s64_with_svst1w_vnum_s64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1w_vnum_s64(svptrue_b32(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1sw_vnum_s64(svptrue_b32(), storage.as_ptr() as *const i32, 1);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_vnum_u16_with_svst1b_vnum_u16() {
+    let len = svcnth() as usize;
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u16(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1b_vnum_u16(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1sb_vnum_u16(svptrue_b8(), storage.as_ptr() as *const i8, 1);
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_vnum_u32_with_svst1b_vnum_u32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u32(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1b_vnum_u32(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1sb_vnum_u32(svptrue_b8(), storage.as_ptr() as *const i8, 1);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_vnum_u32_with_svst1h_vnum_u32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u32(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1h_vnum_u32(svptrue_b16(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded = svld1sh_vnum_u32(svptrue_b16(), storage.as_ptr() as *const i16, 1);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sb_vnum_u64_with_svst1b_vnum_u64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u64(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1b_vnum_u64(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1sb_vnum_u64(svptrue_b8(), storage.as_ptr() as *const i8, 1);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_vnum_u64_with_svst1h_vnum_u64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1h_vnum_u64(svptrue_b16(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded = svld1sh_vnum_u64(svptrue_b16(), storage.as_ptr() as *const i16, 1);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sw_vnum_u64_with_svst1w_vnum_u64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1w_vnum_u64(svptrue_b32(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svld1sw_vnum_u64(svptrue_b32(), storage.as_ptr() as *const i32, 1);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_s32index_s32_with_svst1h_scatter_s32index_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s32(0, 1);
+    svst1h_scatter_s32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svld1sh_gather_s32index_s32(svptrue_b16(), storage.as_ptr() as *const i16, indices);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_s32index_u32_with_svst1h_scatter_s32index_u32() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s32(0, 1);
+    svst1h_scatter_s32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svld1sh_gather_s32index_u32(svptrue_b16(), storage.as_ptr() as *const i16, indices);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_s64index_s64_with_svst1h_scatter_s64index_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svst1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svld1sh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sw_gather_s64index_s64_with_svst1w_scatter_s64index_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svst1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svld1sw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_s64index_u64_with_svst1h_scatter_s64index_u64() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svst1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svld1sh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sw_gather_s64index_u64_with_svst1w_scatter_s64index_u64() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svst1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded =
+        svld1sw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_u32index_s32_with_svst1h_scatter_u32index_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u32(0, 1);
+    svst1h_scatter_u32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svld1sh_gather_u32index_s32(svptrue_b16(), storage.as_ptr() as *const i16, indices);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_u32index_u32_with_svst1h_scatter_u32index_u32() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u32(0, 1);
+    svst1h_scatter_u32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svld1sh_gather_u32index_u32(svptrue_b16(), storage.as_ptr() as *const i16, indices);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_u64index_s64_with_svst1h_scatter_u64index_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svst1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svld1sh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sw_gather_u64index_s64_with_svst1w_scatter_u64index_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svst1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svld1sw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_u64index_u64_with_svst1h_scatter_u64index_u64() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svst1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svld1sh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sw_gather_u64index_u64_with_svst1w_scatter_u64index_u64() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svst1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded =
+        svld1sw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_u32base_index_s32_with_svst1h_scatter_u32base_index_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 2u32.try_into().unwrap());
+    svst1h_scatter_u32base_index_s32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 / (2u32 as i64) + 1,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1sh_gather_u32base_index_s32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 / (2u32 as i64) + 1,
+    );
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_u32base_index_u32_with_svst1h_scatter_u32base_index_u32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 2u32.try_into().unwrap());
+    svst1h_scatter_u32base_index_u32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 / (2u32 as i64) + 1,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1sh_gather_u32base_index_u32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 / (2u32 as i64) + 1,
+    );
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_u64base_index_s64_with_svst1h_scatter_u64base_index_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svst1h_scatter_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1sh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sw_gather_u64base_index_s64_with_svst1w_scatter_u64base_index_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svst1w_scatter_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1sw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sh_gather_u64base_index_u64_with_svst1h_scatter_u64base_index_u64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svst1h_scatter_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1sh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1sw_gather_u64base_index_u64_with_svst1w_scatter_u64base_index_u64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svst1w_scatter_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1sw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_gather_s32offset_s32_with_svst1b_scatter_s32offset_s32() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s32(0, 1u32.try_into().unwrap());
+    svst1b_scatter_s32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1ub_gather_s32offset_s32(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_s32offset_s32_with_svst1h_scatter_s32offset_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s32(0, 2u32.try_into().unwrap());
+    svst1h_scatter_s32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svld1uh_gather_s32offset_s32(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_gather_s32offset_u32_with_svst1b_scatter_s32offset_u32() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s32(0, 1u32.try_into().unwrap());
+    svst1b_scatter_s32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1ub_gather_s32offset_u32(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_s32offset_u32_with_svst1h_scatter_s32offset_u32() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s32(0, 2u32.try_into().unwrap());
+    svst1h_scatter_s32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svld1uh_gather_s32offset_u32(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_gather_s64offset_s64_with_svst1b_scatter_s64offset_s64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 1u32.try_into().unwrap());
+    svst1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1ub_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_s64offset_s64_with_svst1h_scatter_s64offset_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 2u32.try_into().unwrap());
+    svst1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svld1uh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uw_gather_s64offset_s64_with_svst1w_scatter_s64offset_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 4u32.try_into().unwrap());
+    svst1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svld1uw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_gather_s64offset_u64_with_svst1b_scatter_s64offset_u64() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 1u32.try_into().unwrap());
+    svst1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1ub_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_s64offset_u64_with_svst1h_scatter_s64offset_u64() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 2u32.try_into().unwrap());
+    svst1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svld1uh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uw_gather_s64offset_u64_with_svst1w_scatter_s64offset_u64() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 4u32.try_into().unwrap());
+    svst1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded =
+        svld1uw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_gather_u32offset_s32_with_svst1b_scatter_u32offset_s32() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 1u32.try_into().unwrap());
+    svst1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1ub_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_u32offset_s32_with_svst1h_scatter_u32offset_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 2u32.try_into().unwrap());
+    svst1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svld1uh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_gather_u32offset_u32_with_svst1b_scatter_u32offset_u32() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 1u32.try_into().unwrap());
+    svst1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1ub_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_u32offset_u32_with_svst1h_scatter_u32offset_u32() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 2u32.try_into().unwrap());
+    svst1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svld1uh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_gather_u64offset_s64_with_svst1b_scatter_u64offset_s64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    svst1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1ub_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_u64offset_s64_with_svst1h_scatter_u64offset_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    svst1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svld1uh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uw_gather_u64offset_s64_with_svst1w_scatter_u64offset_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    svst1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svld1uw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_gather_u64offset_u64_with_svst1b_scatter_u64offset_u64() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    svst1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1ub_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_u64offset_u64_with_svst1h_scatter_u64offset_u64() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    svst1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svld1uh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uw_gather_u64offset_u64_with_svst1w_scatter_u64offset_u64() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    svst1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded =
+        svld1uw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_gather_u32base_offset_s32_with_svst1b_scatter_u32base_offset_s32() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 1u32.try_into().unwrap());
+    svst1b_scatter_u32base_offset_s32(
+        svptrue_b8(),
+        bases,
+        storage.as_ptr() as i64 + 1u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1ub_gather_u32base_offset_s32(
+        svptrue_b8(),
+        bases,
+        storage.as_ptr() as i64 + 1u32 as i64,
+    );
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_u32base_offset_s32_with_svst1h_scatter_u32base_offset_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 2u32.try_into().unwrap());
+    svst1h_scatter_u32base_offset_s32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 + 2u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1uh_gather_u32base_offset_s32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 + 2u32 as i64,
+    );
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_gather_u32base_offset_u32_with_svst1b_scatter_u32base_offset_u32() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 1u32.try_into().unwrap());
+    svst1b_scatter_u32base_offset_u32(
+        svptrue_b8(),
+        bases,
+        storage.as_ptr() as i64 + 1u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1ub_gather_u32base_offset_u32(
+        svptrue_b8(),
+        bases,
+        storage.as_ptr() as i64 + 1u32 as i64,
+    );
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_u32base_offset_u32_with_svst1h_scatter_u32base_offset_u32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 2u32.try_into().unwrap());
+    svst1h_scatter_u32base_offset_u32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 + 2u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1uh_gather_u32base_offset_u32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 + 2u32 as i64,
+    );
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_gather_u64base_offset_s64_with_svst1b_scatter_u64base_offset_s64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
+    svst1b_scatter_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1ub_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_u64base_offset_s64_with_svst1h_scatter_u64base_offset_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svst1h_scatter_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1uh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uw_gather_u64base_offset_s64_with_svst1w_scatter_u64base_offset_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svst1w_scatter_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1uw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_gather_u64base_offset_u64_with_svst1b_scatter_u64base_offset_u64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
+    svst1b_scatter_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1ub_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_u64base_offset_u64_with_svst1h_scatter_u64base_offset_u64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svst1h_scatter_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1uh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uw_gather_u64base_offset_u64_with_svst1w_scatter_u64base_offset_u64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svst1w_scatter_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1uw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_gather_u64base_s64_with_svst1b_scatter_u64base_s64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
+    svst1b_scatter_u64base_s64(svptrue_b8(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1ub_gather_u64base_s64(svptrue_b8(), bases);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_u64base_s64_with_svst1h_scatter_u64base_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svst1h_scatter_u64base_s64(svptrue_b16(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1uh_gather_u64base_s64(svptrue_b16(), bases);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uw_gather_u64base_s64_with_svst1w_scatter_u64base_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svst1w_scatter_u64base_s64(svptrue_b32(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1uw_gather_u64base_s64(svptrue_b32(), bases);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_gather_u64base_u64_with_svst1b_scatter_u64base_u64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
+    svst1b_scatter_u64base_u64(svptrue_b8(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1ub_gather_u64base_u64(svptrue_b8(), bases);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_u64base_u64_with_svst1h_scatter_u64base_u64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svst1h_scatter_u64base_u64(svptrue_b16(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1uh_gather_u64base_u64(svptrue_b16(), bases);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uw_gather_u64base_u64_with_svst1w_scatter_u64base_u64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svst1w_scatter_u64base_u64(svptrue_b32(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1uw_gather_u64base_u64(svptrue_b32(), bases);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_s16_with_svst1b_s16() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1b_s16(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1ub_s16(svptrue_b8(), storage.as_ptr() as *const u8);
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_s32_with_svst1b_s32() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1b_s32(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1ub_s32(svptrue_b8(), storage.as_ptr() as *const u8);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_s32_with_svst1h_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1h_s32(svptrue_b16(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1uh_s32(svptrue_b16(), storage.as_ptr() as *const u16);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_s64_with_svst1b_s64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1b_s64(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1ub_s64(svptrue_b8(), storage.as_ptr() as *const u8);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_s64_with_svst1h_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1h_s64(svptrue_b16(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1uh_s64(svptrue_b16(), storage.as_ptr() as *const u16);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uw_s64_with_svst1w_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1w_s64(svptrue_b32(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1uw_s64(svptrue_b32(), storage.as_ptr() as *const u32);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_u16_with_svst1b_u16() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1b_u16(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1ub_u16(svptrue_b8(), storage.as_ptr() as *const u8);
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_u32_with_svst1b_u32() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1b_u32(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1ub_u32(svptrue_b8(), storage.as_ptr() as *const u8);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_u32_with_svst1h_u32() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1h_u32(svptrue_b16(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded = svld1uh_u32(svptrue_b16(), storage.as_ptr() as *const u16);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_u64_with_svst1b_u64() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1b_u64(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1ub_u64(svptrue_b8(), storage.as_ptr() as *const u8);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_u64_with_svst1h_u64() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1h_u64(svptrue_b16(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded = svld1uh_u64(svptrue_b16(), storage.as_ptr() as *const u16);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uw_u64_with_svst1w_u64() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svst1w_u64(svptrue_b32(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svld1uw_u64(svptrue_b32(), storage.as_ptr() as *const u32);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_vnum_s16_with_svst1b_vnum_s16() {
+    let len = svcnth() as usize;
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s16(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1b_vnum_s16(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1ub_vnum_s16(svptrue_b8(), storage.as_ptr() as *const u8, 1);
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_vnum_s32_with_svst1b_vnum_s32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s32(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1b_vnum_s32(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1ub_vnum_s32(svptrue_b8(), storage.as_ptr() as *const u8, 1);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_vnum_s32_with_svst1h_vnum_s32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1h_vnum_s32(svptrue_b16(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1uh_vnum_s32(svptrue_b16(), storage.as_ptr() as *const u16, 1);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_vnum_s64_with_svst1b_vnum_s64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1b_vnum_s64(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld1ub_vnum_s64(svptrue_b8(), storage.as_ptr() as *const u8, 1);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_vnum_s64_with_svst1h_vnum_s64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1h_vnum_s64(svptrue_b16(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1uh_vnum_s64(svptrue_b16(), storage.as_ptr() as *const u16, 1);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uw_vnum_s64_with_svst1w_vnum_s64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1w_vnum_s64(svptrue_b32(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1uw_vnum_s64(svptrue_b32(), storage.as_ptr() as *const u32, 1);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_vnum_u16_with_svst1b_vnum_u16() {
+    let len = svcnth() as usize;
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u16(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1b_vnum_u16(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1ub_vnum_u16(svptrue_b8(), storage.as_ptr() as *const u8, 1);
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_vnum_u32_with_svst1b_vnum_u32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u32(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1b_vnum_u32(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1ub_vnum_u32(svptrue_b8(), storage.as_ptr() as *const u8, 1);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_vnum_u32_with_svst1h_vnum_u32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u32(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1h_vnum_u32(svptrue_b16(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded = svld1uh_vnum_u32(svptrue_b16(), storage.as_ptr() as *const u16, 1);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1ub_vnum_u64_with_svst1b_vnum_u64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u64(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1b_vnum_u64(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld1ub_vnum_u64(svptrue_b8(), storage.as_ptr() as *const u8, 1);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_vnum_u64_with_svst1h_vnum_u64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1h_vnum_u64(svptrue_b16(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded = svld1uh_vnum_u64(svptrue_b16(), storage.as_ptr() as *const u16, 1);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uw_vnum_u64_with_svst1w_vnum_u64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svst1w_vnum_u64(svptrue_b32(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svld1uw_vnum_u64(svptrue_b32(), storage.as_ptr() as *const u32, 1);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_s32index_s32_with_svst1h_scatter_s32index_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s32(0, 1);
+    svst1h_scatter_s32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svld1uh_gather_s32index_s32(svptrue_b16(), storage.as_ptr() as *const u16, indices);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_s32index_u32_with_svst1h_scatter_s32index_u32() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s32(0, 1);
+    svst1h_scatter_s32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svld1uh_gather_s32index_u32(svptrue_b16(), storage.as_ptr() as *const u16, indices);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_s64index_s64_with_svst1h_scatter_s64index_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svst1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svld1uh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uw_gather_s64index_s64_with_svst1w_scatter_s64index_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svst1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svld1uw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_s64index_u64_with_svst1h_scatter_s64index_u64() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svst1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svld1uh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uw_gather_s64index_u64_with_svst1w_scatter_s64index_u64() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svst1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded =
+        svld1uw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_u32index_s32_with_svst1h_scatter_u32index_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u32(0, 1);
+    svst1h_scatter_u32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svld1uh_gather_u32index_s32(svptrue_b16(), storage.as_ptr() as *const u16, indices);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_u32index_u32_with_svst1h_scatter_u32index_u32() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u32(0, 1);
+    svst1h_scatter_u32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svld1uh_gather_u32index_u32(svptrue_b16(), storage.as_ptr() as *const u16, indices);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_u64index_s64_with_svst1h_scatter_u64index_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svst1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svld1uh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uw_gather_u64index_s64_with_svst1w_scatter_u64index_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svst1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svld1uw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_u64index_u64_with_svst1h_scatter_u64index_u64() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svst1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svld1uh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uw_gather_u64index_u64_with_svst1w_scatter_u64index_u64() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svst1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded =
+        svld1uw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_u32base_index_s32_with_svst1h_scatter_u32base_index_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 2u32.try_into().unwrap());
+    svst1h_scatter_u32base_index_s32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 / (2u32 as i64) + 1,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1uh_gather_u32base_index_s32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 / (2u32 as i64) + 1,
+    );
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_u32base_index_u32_with_svst1h_scatter_u32base_index_u32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 2u32.try_into().unwrap());
+    svst1h_scatter_u32base_index_u32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 / (2u32 as i64) + 1,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1uh_gather_u32base_index_u32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 / (2u32 as i64) + 1,
+    );
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_u64base_index_s64_with_svst1h_scatter_u64base_index_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svst1h_scatter_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1uh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uw_gather_u64base_index_s64_with_svst1w_scatter_u64base_index_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svst1w_scatter_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1uw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uh_gather_u64base_index_u64_with_svst1h_scatter_u64base_index_u64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svst1h_scatter_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld1uh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld1uw_gather_u64base_index_u64_with_svst1w_scatter_u64base_index_u64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svst1w_scatter_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld1uw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_f32_with_svst2_f32() {
+    let mut storage = [0 as f32; 320usize];
+    let data = svcreate2_f32(
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        ),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        ),
+    );
+    svst2_f32(svptrue_b32(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f32 || val == i as f32);
+    }
+    svsetffr();
+    let loaded = svld2_f32(svptrue_b32(), storage.as_ptr() as *const f32);
+    assert_vector_matches_f32(
+        svget2_f32::<{ 0usize as i32 }>(loaded),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        ),
+    );
+    assert_vector_matches_f32(
+        svget2_f32::<{ 1usize as i32 }>(loaded),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_f64_with_svst2_f64() {
+    let mut storage = [0 as f64; 160usize];
+    let data = svcreate2_f64(
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        ),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        ),
+    );
+    svst2_f64(svptrue_b64(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svld2_f64(svptrue_b64(), storage.as_ptr() as *const f64);
+    assert_vector_matches_f64(
+        svget2_f64::<{ 0usize as i32 }>(loaded),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        ),
+    );
+    assert_vector_matches_f64(
+        svget2_f64::<{ 1usize as i32 }>(loaded),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_s8_with_svst2_s8() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svcreate2_s8(
+        svindex_s8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_s8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+    svst2_s8(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld2_s8(svptrue_b8(), storage.as_ptr() as *const i8);
+    assert_vector_matches_i8(
+        svget2_s8::<{ 0usize as i32 }>(loaded),
+        svindex_s8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i8(
+        svget2_s8::<{ 1usize as i32 }>(loaded),
+        svindex_s8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_s16_with_svst2_s16() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svcreate2_s16(
+        svindex_s16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_s16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+    svst2_s16(svptrue_b16(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld2_s16(svptrue_b16(), storage.as_ptr() as *const i16);
+    assert_vector_matches_i16(
+        svget2_s16::<{ 0usize as i32 }>(loaded),
+        svindex_s16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i16(
+        svget2_s16::<{ 1usize as i32 }>(loaded),
+        svindex_s16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_s32_with_svst2_s32() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svcreate2_s32(
+        svindex_s32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_s32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+    svst2_s32(svptrue_b32(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld2_s32(svptrue_b32(), storage.as_ptr() as *const i32);
+    assert_vector_matches_i32(
+        svget2_s32::<{ 0usize as i32 }>(loaded),
+        svindex_s32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i32(
+        svget2_s32::<{ 1usize as i32 }>(loaded),
+        svindex_s32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_s64_with_svst2_s64() {
+    let mut storage = [0 as i64; 160usize];
+    let data = svcreate2_s64(
+        svindex_s64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_s64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+    svst2_s64(svptrue_b64(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svld2_s64(svptrue_b64(), storage.as_ptr() as *const i64);
+    assert_vector_matches_i64(
+        svget2_s64::<{ 0usize as i32 }>(loaded),
+        svindex_s64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i64(
+        svget2_s64::<{ 1usize as i32 }>(loaded),
+        svindex_s64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_u8_with_svst2_u8() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svcreate2_u8(
+        svindex_u8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_u8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+    svst2_u8(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld2_u8(svptrue_b8(), storage.as_ptr() as *const u8);
+    assert_vector_matches_u8(
+        svget2_u8::<{ 0usize as i32 }>(loaded),
+        svindex_u8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u8(
+        svget2_u8::<{ 1usize as i32 }>(loaded),
+        svindex_u8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_u16_with_svst2_u16() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svcreate2_u16(
+        svindex_u16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_u16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+    svst2_u16(svptrue_b16(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded = svld2_u16(svptrue_b16(), storage.as_ptr() as *const u16);
+    assert_vector_matches_u16(
+        svget2_u16::<{ 0usize as i32 }>(loaded),
+        svindex_u16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u16(
+        svget2_u16::<{ 1usize as i32 }>(loaded),
+        svindex_u16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_u32_with_svst2_u32() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svcreate2_u32(
+        svindex_u32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_u32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+    svst2_u32(svptrue_b32(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svld2_u32(svptrue_b32(), storage.as_ptr() as *const u32);
+    assert_vector_matches_u32(
+        svget2_u32::<{ 0usize as i32 }>(loaded),
+        svindex_u32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u32(
+        svget2_u32::<{ 1usize as i32 }>(loaded),
+        svindex_u32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_u64_with_svst2_u64() {
+    let mut storage = [0 as u64; 160usize];
+    let data = svcreate2_u64(
+        svindex_u64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_u64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+    svst2_u64(svptrue_b64(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svld2_u64(svptrue_b64(), storage.as_ptr() as *const u64);
+    assert_vector_matches_u64(
+        svget2_u64::<{ 0usize as i32 }>(loaded),
+        svindex_u64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u64(
+        svget2_u64::<{ 1usize as i32 }>(loaded),
+        svindex_u64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_vnum_f32_with_svst2_vnum_f32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as f32; 320usize];
+    let data = svcreate2_f32(
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 0usize).try_into().unwrap(),
+                2usize.try_into().unwrap(),
+            ),
+        ),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 1usize).try_into().unwrap(),
+                2usize.try_into().unwrap(),
+            ),
+        ),
+    );
+    svst2_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f32 || val == i as f32);
+    }
+    svsetffr();
+    let loaded = svld2_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1);
+    assert_vector_matches_f32(
+        svget2_f32::<{ 0usize as i32 }>(loaded),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 0usize).try_into().unwrap(),
+                2usize.try_into().unwrap(),
+            ),
+        ),
+    );
+    assert_vector_matches_f32(
+        svget2_f32::<{ 1usize as i32 }>(loaded),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 1usize).try_into().unwrap(),
+                2usize.try_into().unwrap(),
+            ),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_vnum_f64_with_svst2_vnum_f64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as f64; 160usize];
+    let data = svcreate2_f64(
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 0usize).try_into().unwrap(),
+                2usize.try_into().unwrap(),
+            ),
+        ),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 1usize).try_into().unwrap(),
+                2usize.try_into().unwrap(),
+            ),
+        ),
+    );
+    svst2_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svld2_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1);
+    assert_vector_matches_f64(
+        svget2_f64::<{ 0usize as i32 }>(loaded),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 0usize).try_into().unwrap(),
+                2usize.try_into().unwrap(),
+            ),
+        ),
+    );
+    assert_vector_matches_f64(
+        svget2_f64::<{ 1usize as i32 }>(loaded),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 1usize).try_into().unwrap(),
+                2usize.try_into().unwrap(),
+            ),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_vnum_s8_with_svst2_vnum_s8() {
+    let len = svcntb() as usize;
+    let mut storage = [0 as i8; 1280usize];
+    let data = svcreate2_s8(
+        svindex_s8(
+            (len + 0usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+        svindex_s8(
+            (len + 1usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+    svst2_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld2_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1);
+    assert_vector_matches_i8(
+        svget2_s8::<{ 0usize as i32 }>(loaded),
+        svindex_s8(
+            (len + 0usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i8(
+        svget2_s8::<{ 1usize as i32 }>(loaded),
+        svindex_s8(
+            (len + 1usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_vnum_s16_with_svst2_vnum_s16() {
+    let len = svcnth() as usize;
+    let mut storage = [0 as i16; 640usize];
+    let data = svcreate2_s16(
+        svindex_s16(
+            (len + 0usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+        svindex_s16(
+            (len + 1usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+    svst2_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld2_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1);
+    assert_vector_matches_i16(
+        svget2_s16::<{ 0usize as i32 }>(loaded),
+        svindex_s16(
+            (len + 0usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i16(
+        svget2_s16::<{ 1usize as i32 }>(loaded),
+        svindex_s16(
+            (len + 1usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_vnum_s32_with_svst2_vnum_s32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as i32; 320usize];
+    let data = svcreate2_s32(
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+        svindex_s32(
+            (len + 1usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+    svst2_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld2_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1);
+    assert_vector_matches_i32(
+        svget2_s32::<{ 0usize as i32 }>(loaded),
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i32(
+        svget2_s32::<{ 1usize as i32 }>(loaded),
+        svindex_s32(
+            (len + 1usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_vnum_s64_with_svst2_vnum_s64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as i64; 160usize];
+    let data = svcreate2_s64(
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+        svindex_s64(
+            (len + 1usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+    svst2_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svld2_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1);
+    assert_vector_matches_i64(
+        svget2_s64::<{ 0usize as i32 }>(loaded),
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i64(
+        svget2_s64::<{ 1usize as i32 }>(loaded),
+        svindex_s64(
+            (len + 1usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_vnum_u8_with_svst2_vnum_u8() {
+    let len = svcntb() as usize;
+    let mut storage = [0 as u8; 1280usize];
+    let data = svcreate2_u8(
+        svindex_u8(
+            (len + 0usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+        svindex_u8(
+            (len + 1usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+    svst2_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld2_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1);
+    assert_vector_matches_u8(
+        svget2_u8::<{ 0usize as i32 }>(loaded),
+        svindex_u8(
+            (len + 0usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u8(
+        svget2_u8::<{ 1usize as i32 }>(loaded),
+        svindex_u8(
+            (len + 1usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_vnum_u16_with_svst2_vnum_u16() {
+    let len = svcnth() as usize;
+    let mut storage = [0 as u16; 640usize];
+    let data = svcreate2_u16(
+        svindex_u16(
+            (len + 0usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+        svindex_u16(
+            (len + 1usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+    svst2_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded = svld2_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1);
+    assert_vector_matches_u16(
+        svget2_u16::<{ 0usize as i32 }>(loaded),
+        svindex_u16(
+            (len + 0usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u16(
+        svget2_u16::<{ 1usize as i32 }>(loaded),
+        svindex_u16(
+            (len + 1usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_vnum_u32_with_svst2_vnum_u32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as u32; 320usize];
+    let data = svcreate2_u32(
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+        svindex_u32(
+            (len + 1usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+    svst2_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svld2_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1);
+    assert_vector_matches_u32(
+        svget2_u32::<{ 0usize as i32 }>(loaded),
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u32(
+        svget2_u32::<{ 1usize as i32 }>(loaded),
+        svindex_u32(
+            (len + 1usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld2_vnum_u64_with_svst2_vnum_u64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as u64; 160usize];
+    let data = svcreate2_u64(
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+        svindex_u64(
+            (len + 1usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+    svst2_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svld2_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1);
+    assert_vector_matches_u64(
+        svget2_u64::<{ 0usize as i32 }>(loaded),
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u64(
+        svget2_u64::<{ 1usize as i32 }>(loaded),
+        svindex_u64(
+            (len + 1usize).try_into().unwrap(),
+            2usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_f32_with_svst3_f32() {
+    let mut storage = [0 as f32; 320usize];
+    let data = svcreate3_f32(
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        ),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        ),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        ),
+    );
+    svst3_f32(svptrue_b32(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f32 || val == i as f32);
+    }
+    svsetffr();
+    let loaded = svld3_f32(svptrue_b32(), storage.as_ptr() as *const f32);
+    assert_vector_matches_f32(
+        svget3_f32::<{ 0usize as i32 }>(loaded),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        ),
+    );
+    assert_vector_matches_f32(
+        svget3_f32::<{ 1usize as i32 }>(loaded),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        ),
+    );
+    assert_vector_matches_f32(
+        svget3_f32::<{ 2usize as i32 }>(loaded),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_f64_with_svst3_f64() {
+    let mut storage = [0 as f64; 160usize];
+    let data = svcreate3_f64(
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        ),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        ),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        ),
+    );
+    svst3_f64(svptrue_b64(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svld3_f64(svptrue_b64(), storage.as_ptr() as *const f64);
+    assert_vector_matches_f64(
+        svget3_f64::<{ 0usize as i32 }>(loaded),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        ),
+    );
+    assert_vector_matches_f64(
+        svget3_f64::<{ 1usize as i32 }>(loaded),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        ),
+    );
+    assert_vector_matches_f64(
+        svget3_f64::<{ 2usize as i32 }>(loaded),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_s8_with_svst3_s8() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svcreate3_s8(
+        svindex_s8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    svst3_s8(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld3_s8(svptrue_b8(), storage.as_ptr() as *const i8);
+    assert_vector_matches_i8(
+        svget3_s8::<{ 0usize as i32 }>(loaded),
+        svindex_s8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i8(
+        svget3_s8::<{ 1usize as i32 }>(loaded),
+        svindex_s8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i8(
+        svget3_s8::<{ 2usize as i32 }>(loaded),
+        svindex_s8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_s16_with_svst3_s16() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svcreate3_s16(
+        svindex_s16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    svst3_s16(svptrue_b16(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld3_s16(svptrue_b16(), storage.as_ptr() as *const i16);
+    assert_vector_matches_i16(
+        svget3_s16::<{ 0usize as i32 }>(loaded),
+        svindex_s16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i16(
+        svget3_s16::<{ 1usize as i32 }>(loaded),
+        svindex_s16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i16(
+        svget3_s16::<{ 2usize as i32 }>(loaded),
+        svindex_s16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_s32_with_svst3_s32() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svcreate3_s32(
+        svindex_s32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    svst3_s32(svptrue_b32(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld3_s32(svptrue_b32(), storage.as_ptr() as *const i32);
+    assert_vector_matches_i32(
+        svget3_s32::<{ 0usize as i32 }>(loaded),
+        svindex_s32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i32(
+        svget3_s32::<{ 1usize as i32 }>(loaded),
+        svindex_s32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i32(
+        svget3_s32::<{ 2usize as i32 }>(loaded),
+        svindex_s32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_s64_with_svst3_s64() {
+    let mut storage = [0 as i64; 160usize];
+    let data = svcreate3_s64(
+        svindex_s64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    svst3_s64(svptrue_b64(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svld3_s64(svptrue_b64(), storage.as_ptr() as *const i64);
+    assert_vector_matches_i64(
+        svget3_s64::<{ 0usize as i32 }>(loaded),
+        svindex_s64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i64(
+        svget3_s64::<{ 1usize as i32 }>(loaded),
+        svindex_s64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i64(
+        svget3_s64::<{ 2usize as i32 }>(loaded),
+        svindex_s64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_u8_with_svst3_u8() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svcreate3_u8(
+        svindex_u8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    svst3_u8(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld3_u8(svptrue_b8(), storage.as_ptr() as *const u8);
+    assert_vector_matches_u8(
+        svget3_u8::<{ 0usize as i32 }>(loaded),
+        svindex_u8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u8(
+        svget3_u8::<{ 1usize as i32 }>(loaded),
+        svindex_u8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u8(
+        svget3_u8::<{ 2usize as i32 }>(loaded),
+        svindex_u8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_u16_with_svst3_u16() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svcreate3_u16(
+        svindex_u16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    svst3_u16(svptrue_b16(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded = svld3_u16(svptrue_b16(), storage.as_ptr() as *const u16);
+    assert_vector_matches_u16(
+        svget3_u16::<{ 0usize as i32 }>(loaded),
+        svindex_u16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u16(
+        svget3_u16::<{ 1usize as i32 }>(loaded),
+        svindex_u16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u16(
+        svget3_u16::<{ 2usize as i32 }>(loaded),
+        svindex_u16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_u32_with_svst3_u32() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svcreate3_u32(
+        svindex_u32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    svst3_u32(svptrue_b32(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svld3_u32(svptrue_b32(), storage.as_ptr() as *const u32);
+    assert_vector_matches_u32(
+        svget3_u32::<{ 0usize as i32 }>(loaded),
+        svindex_u32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u32(
+        svget3_u32::<{ 1usize as i32 }>(loaded),
+        svindex_u32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u32(
+        svget3_u32::<{ 2usize as i32 }>(loaded),
+        svindex_u32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_u64_with_svst3_u64() {
+    let mut storage = [0 as u64; 160usize];
+    let data = svcreate3_u64(
+        svindex_u64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    svst3_u64(svptrue_b64(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svld3_u64(svptrue_b64(), storage.as_ptr() as *const u64);
+    assert_vector_matches_u64(
+        svget3_u64::<{ 0usize as i32 }>(loaded),
+        svindex_u64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u64(
+        svget3_u64::<{ 1usize as i32 }>(loaded),
+        svindex_u64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u64(
+        svget3_u64::<{ 2usize as i32 }>(loaded),
+        svindex_u64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_vnum_f32_with_svst3_vnum_f32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as f32; 320usize];
+    let data = svcreate3_f32(
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 0usize).try_into().unwrap(),
+                3usize.try_into().unwrap(),
+            ),
+        ),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 1usize).try_into().unwrap(),
+                3usize.try_into().unwrap(),
+            ),
+        ),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 2usize).try_into().unwrap(),
+                3usize.try_into().unwrap(),
+            ),
+        ),
+    );
+    svst3_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f32 || val == i as f32);
+    }
+    svsetffr();
+    let loaded = svld3_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1);
+    assert_vector_matches_f32(
+        svget3_f32::<{ 0usize as i32 }>(loaded),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 0usize).try_into().unwrap(),
+                3usize.try_into().unwrap(),
+            ),
+        ),
+    );
+    assert_vector_matches_f32(
+        svget3_f32::<{ 1usize as i32 }>(loaded),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 1usize).try_into().unwrap(),
+                3usize.try_into().unwrap(),
+            ),
+        ),
+    );
+    assert_vector_matches_f32(
+        svget3_f32::<{ 2usize as i32 }>(loaded),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 2usize).try_into().unwrap(),
+                3usize.try_into().unwrap(),
+            ),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_vnum_f64_with_svst3_vnum_f64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as f64; 160usize];
+    let data = svcreate3_f64(
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 0usize).try_into().unwrap(),
+                3usize.try_into().unwrap(),
+            ),
+        ),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 1usize).try_into().unwrap(),
+                3usize.try_into().unwrap(),
+            ),
+        ),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 2usize).try_into().unwrap(),
+                3usize.try_into().unwrap(),
+            ),
+        ),
+    );
+    svst3_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svld3_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1);
+    assert_vector_matches_f64(
+        svget3_f64::<{ 0usize as i32 }>(loaded),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 0usize).try_into().unwrap(),
+                3usize.try_into().unwrap(),
+            ),
+        ),
+    );
+    assert_vector_matches_f64(
+        svget3_f64::<{ 1usize as i32 }>(loaded),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 1usize).try_into().unwrap(),
+                3usize.try_into().unwrap(),
+            ),
+        ),
+    );
+    assert_vector_matches_f64(
+        svget3_f64::<{ 2usize as i32 }>(loaded),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 2usize).try_into().unwrap(),
+                3usize.try_into().unwrap(),
+            ),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_vnum_s8_with_svst3_vnum_s8() {
+    let len = svcntb() as usize;
+    let mut storage = [0 as i8; 1280usize];
+    let data = svcreate3_s8(
+        svindex_s8(
+            (len + 0usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+        svindex_s8(
+            (len + 1usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+        svindex_s8(
+            (len + 2usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    svst3_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld3_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1);
+    assert_vector_matches_i8(
+        svget3_s8::<{ 0usize as i32 }>(loaded),
+        svindex_s8(
+            (len + 0usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i8(
+        svget3_s8::<{ 1usize as i32 }>(loaded),
+        svindex_s8(
+            (len + 1usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i8(
+        svget3_s8::<{ 2usize as i32 }>(loaded),
+        svindex_s8(
+            (len + 2usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_vnum_s16_with_svst3_vnum_s16() {
+    let len = svcnth() as usize;
+    let mut storage = [0 as i16; 640usize];
+    let data = svcreate3_s16(
+        svindex_s16(
+            (len + 0usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+        svindex_s16(
+            (len + 1usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+        svindex_s16(
+            (len + 2usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    svst3_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld3_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1);
+    assert_vector_matches_i16(
+        svget3_s16::<{ 0usize as i32 }>(loaded),
+        svindex_s16(
+            (len + 0usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i16(
+        svget3_s16::<{ 1usize as i32 }>(loaded),
+        svindex_s16(
+            (len + 1usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i16(
+        svget3_s16::<{ 2usize as i32 }>(loaded),
+        svindex_s16(
+            (len + 2usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_vnum_s32_with_svst3_vnum_s32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as i32; 320usize];
+    let data = svcreate3_s32(
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+        svindex_s32(
+            (len + 1usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+        svindex_s32(
+            (len + 2usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    svst3_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld3_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1);
+    assert_vector_matches_i32(
+        svget3_s32::<{ 0usize as i32 }>(loaded),
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i32(
+        svget3_s32::<{ 1usize as i32 }>(loaded),
+        svindex_s32(
+            (len + 1usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i32(
+        svget3_s32::<{ 2usize as i32 }>(loaded),
+        svindex_s32(
+            (len + 2usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_vnum_s64_with_svst3_vnum_s64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as i64; 160usize];
+    let data = svcreate3_s64(
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+        svindex_s64(
+            (len + 1usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+        svindex_s64(
+            (len + 2usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    svst3_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svld3_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1);
+    assert_vector_matches_i64(
+        svget3_s64::<{ 0usize as i32 }>(loaded),
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i64(
+        svget3_s64::<{ 1usize as i32 }>(loaded),
+        svindex_s64(
+            (len + 1usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i64(
+        svget3_s64::<{ 2usize as i32 }>(loaded),
+        svindex_s64(
+            (len + 2usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_vnum_u8_with_svst3_vnum_u8() {
+    let len = svcntb() as usize;
+    let mut storage = [0 as u8; 1280usize];
+    let data = svcreate3_u8(
+        svindex_u8(
+            (len + 0usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+        svindex_u8(
+            (len + 1usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+        svindex_u8(
+            (len + 2usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    svst3_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld3_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1);
+    assert_vector_matches_u8(
+        svget3_u8::<{ 0usize as i32 }>(loaded),
+        svindex_u8(
+            (len + 0usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u8(
+        svget3_u8::<{ 1usize as i32 }>(loaded),
+        svindex_u8(
+            (len + 1usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u8(
+        svget3_u8::<{ 2usize as i32 }>(loaded),
+        svindex_u8(
+            (len + 2usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_vnum_u16_with_svst3_vnum_u16() {
+    let len = svcnth() as usize;
+    let mut storage = [0 as u16; 640usize];
+    let data = svcreate3_u16(
+        svindex_u16(
+            (len + 0usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+        svindex_u16(
+            (len + 1usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+        svindex_u16(
+            (len + 2usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    svst3_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded = svld3_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1);
+    assert_vector_matches_u16(
+        svget3_u16::<{ 0usize as i32 }>(loaded),
+        svindex_u16(
+            (len + 0usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u16(
+        svget3_u16::<{ 1usize as i32 }>(loaded),
+        svindex_u16(
+            (len + 1usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u16(
+        svget3_u16::<{ 2usize as i32 }>(loaded),
+        svindex_u16(
+            (len + 2usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_vnum_u32_with_svst3_vnum_u32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as u32; 320usize];
+    let data = svcreate3_u32(
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+        svindex_u32(
+            (len + 1usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+        svindex_u32(
+            (len + 2usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    svst3_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svld3_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1);
+    assert_vector_matches_u32(
+        svget3_u32::<{ 0usize as i32 }>(loaded),
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u32(
+        svget3_u32::<{ 1usize as i32 }>(loaded),
+        svindex_u32(
+            (len + 1usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u32(
+        svget3_u32::<{ 2usize as i32 }>(loaded),
+        svindex_u32(
+            (len + 2usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld3_vnum_u64_with_svst3_vnum_u64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as u64; 160usize];
+    let data = svcreate3_u64(
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+        svindex_u64(
+            (len + 1usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+        svindex_u64(
+            (len + 2usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    svst3_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svld3_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1);
+    assert_vector_matches_u64(
+        svget3_u64::<{ 0usize as i32 }>(loaded),
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u64(
+        svget3_u64::<{ 1usize as i32 }>(loaded),
+        svindex_u64(
+            (len + 1usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u64(
+        svget3_u64::<{ 2usize as i32 }>(loaded),
+        svindex_u64(
+            (len + 2usize).try_into().unwrap(),
+            3usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_f32_with_svst4_f32() {
+    let mut storage = [0 as f32; 320usize];
+    let data = svcreate4_f32(
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        ),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        ),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        ),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        ),
+    );
+    svst4_f32(svptrue_b32(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f32 || val == i as f32);
+    }
+    svsetffr();
+    let loaded = svld4_f32(svptrue_b32(), storage.as_ptr() as *const f32);
+    assert_vector_matches_f32(
+        svget4_f32::<{ 0usize as i32 }>(loaded),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        ),
+    );
+    assert_vector_matches_f32(
+        svget4_f32::<{ 1usize as i32 }>(loaded),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        ),
+    );
+    assert_vector_matches_f32(
+        svget4_f32::<{ 2usize as i32 }>(loaded),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        ),
+    );
+    assert_vector_matches_f32(
+        svget4_f32::<{ 3usize as i32 }>(loaded),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_f64_with_svst4_f64() {
+    let mut storage = [0 as f64; 160usize];
+    let data = svcreate4_f64(
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        ),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        ),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        ),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        ),
+    );
+    svst4_f64(svptrue_b64(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svld4_f64(svptrue_b64(), storage.as_ptr() as *const f64);
+    assert_vector_matches_f64(
+        svget4_f64::<{ 0usize as i32 }>(loaded),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        ),
+    );
+    assert_vector_matches_f64(
+        svget4_f64::<{ 1usize as i32 }>(loaded),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        ),
+    );
+    assert_vector_matches_f64(
+        svget4_f64::<{ 2usize as i32 }>(loaded),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        ),
+    );
+    assert_vector_matches_f64(
+        svget4_f64::<{ 3usize as i32 }>(loaded),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_s8_with_svst4_s8() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svcreate4_s8(
+        svindex_s8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    svst4_s8(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld4_s8(svptrue_b8(), storage.as_ptr() as *const i8);
+    assert_vector_matches_i8(
+        svget4_s8::<{ 0usize as i32 }>(loaded),
+        svindex_s8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i8(
+        svget4_s8::<{ 1usize as i32 }>(loaded),
+        svindex_s8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i8(
+        svget4_s8::<{ 2usize as i32 }>(loaded),
+        svindex_s8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i8(
+        svget4_s8::<{ 3usize as i32 }>(loaded),
+        svindex_s8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_s16_with_svst4_s16() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svcreate4_s16(
+        svindex_s16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    svst4_s16(svptrue_b16(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld4_s16(svptrue_b16(), storage.as_ptr() as *const i16);
+    assert_vector_matches_i16(
+        svget4_s16::<{ 0usize as i32 }>(loaded),
+        svindex_s16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i16(
+        svget4_s16::<{ 1usize as i32 }>(loaded),
+        svindex_s16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i16(
+        svget4_s16::<{ 2usize as i32 }>(loaded),
+        svindex_s16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i16(
+        svget4_s16::<{ 3usize as i32 }>(loaded),
+        svindex_s16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_s32_with_svst4_s32() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svcreate4_s32(
+        svindex_s32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    svst4_s32(svptrue_b32(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld4_s32(svptrue_b32(), storage.as_ptr() as *const i32);
+    assert_vector_matches_i32(
+        svget4_s32::<{ 0usize as i32 }>(loaded),
+        svindex_s32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i32(
+        svget4_s32::<{ 1usize as i32 }>(loaded),
+        svindex_s32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i32(
+        svget4_s32::<{ 2usize as i32 }>(loaded),
+        svindex_s32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i32(
+        svget4_s32::<{ 3usize as i32 }>(loaded),
+        svindex_s32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_s64_with_svst4_s64() {
+    let mut storage = [0 as i64; 160usize];
+    let data = svcreate4_s64(
+        svindex_s64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    svst4_s64(svptrue_b64(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svld4_s64(svptrue_b64(), storage.as_ptr() as *const i64);
+    assert_vector_matches_i64(
+        svget4_s64::<{ 0usize as i32 }>(loaded),
+        svindex_s64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i64(
+        svget4_s64::<{ 1usize as i32 }>(loaded),
+        svindex_s64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i64(
+        svget4_s64::<{ 2usize as i32 }>(loaded),
+        svindex_s64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_i64(
+        svget4_s64::<{ 3usize as i32 }>(loaded),
+        svindex_s64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_u8_with_svst4_u8() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svcreate4_u8(
+        svindex_u8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    svst4_u8(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld4_u8(svptrue_b8(), storage.as_ptr() as *const u8);
+    assert_vector_matches_u8(
+        svget4_u8::<{ 0usize as i32 }>(loaded),
+        svindex_u8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u8(
+        svget4_u8::<{ 1usize as i32 }>(loaded),
+        svindex_u8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u8(
+        svget4_u8::<{ 2usize as i32 }>(loaded),
+        svindex_u8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u8(
+        svget4_u8::<{ 3usize as i32 }>(loaded),
+        svindex_u8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_u16_with_svst4_u16() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svcreate4_u16(
+        svindex_u16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    svst4_u16(svptrue_b16(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded = svld4_u16(svptrue_b16(), storage.as_ptr() as *const u16);
+    assert_vector_matches_u16(
+        svget4_u16::<{ 0usize as i32 }>(loaded),
+        svindex_u16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u16(
+        svget4_u16::<{ 1usize as i32 }>(loaded),
+        svindex_u16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u16(
+        svget4_u16::<{ 2usize as i32 }>(loaded),
+        svindex_u16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u16(
+        svget4_u16::<{ 3usize as i32 }>(loaded),
+        svindex_u16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_u32_with_svst4_u32() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svcreate4_u32(
+        svindex_u32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    svst4_u32(svptrue_b32(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svld4_u32(svptrue_b32(), storage.as_ptr() as *const u32);
+    assert_vector_matches_u32(
+        svget4_u32::<{ 0usize as i32 }>(loaded),
+        svindex_u32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u32(
+        svget4_u32::<{ 1usize as i32 }>(loaded),
+        svindex_u32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u32(
+        svget4_u32::<{ 2usize as i32 }>(loaded),
+        svindex_u32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u32(
+        svget4_u32::<{ 3usize as i32 }>(loaded),
+        svindex_u32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_u64_with_svst4_u64() {
+    let mut storage = [0 as u64; 160usize];
+    let data = svcreate4_u64(
+        svindex_u64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    svst4_u64(svptrue_b64(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svld4_u64(svptrue_b64(), storage.as_ptr() as *const u64);
+    assert_vector_matches_u64(
+        svget4_u64::<{ 0usize as i32 }>(loaded),
+        svindex_u64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u64(
+        svget4_u64::<{ 1usize as i32 }>(loaded),
+        svindex_u64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u64(
+        svget4_u64::<{ 2usize as i32 }>(loaded),
+        svindex_u64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+    assert_vector_matches_u64(
+        svget4_u64::<{ 3usize as i32 }>(loaded),
+        svindex_u64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_vnum_f32_with_svst4_vnum_f32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as f32; 320usize];
+    let data = svcreate4_f32(
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 0usize).try_into().unwrap(),
+                4usize.try_into().unwrap(),
+            ),
+        ),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 1usize).try_into().unwrap(),
+                4usize.try_into().unwrap(),
+            ),
+        ),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 2usize).try_into().unwrap(),
+                4usize.try_into().unwrap(),
+            ),
+        ),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 3usize).try_into().unwrap(),
+                4usize.try_into().unwrap(),
+            ),
+        ),
+    );
+    svst4_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f32 || val == i as f32);
+    }
+    svsetffr();
+    let loaded = svld4_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1);
+    assert_vector_matches_f32(
+        svget4_f32::<{ 0usize as i32 }>(loaded),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 0usize).try_into().unwrap(),
+                4usize.try_into().unwrap(),
+            ),
+        ),
+    );
+    assert_vector_matches_f32(
+        svget4_f32::<{ 1usize as i32 }>(loaded),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 1usize).try_into().unwrap(),
+                4usize.try_into().unwrap(),
+            ),
+        ),
+    );
+    assert_vector_matches_f32(
+        svget4_f32::<{ 2usize as i32 }>(loaded),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 2usize).try_into().unwrap(),
+                4usize.try_into().unwrap(),
+            ),
+        ),
+    );
+    assert_vector_matches_f32(
+        svget4_f32::<{ 3usize as i32 }>(loaded),
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 3usize).try_into().unwrap(),
+                4usize.try_into().unwrap(),
+            ),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_vnum_f64_with_svst4_vnum_f64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as f64; 160usize];
+    let data = svcreate4_f64(
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 0usize).try_into().unwrap(),
+                4usize.try_into().unwrap(),
+            ),
+        ),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 1usize).try_into().unwrap(),
+                4usize.try_into().unwrap(),
+            ),
+        ),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 2usize).try_into().unwrap(),
+                4usize.try_into().unwrap(),
+            ),
+        ),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 3usize).try_into().unwrap(),
+                4usize.try_into().unwrap(),
+            ),
+        ),
+    );
+    svst4_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svld4_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1);
+    assert_vector_matches_f64(
+        svget4_f64::<{ 0usize as i32 }>(loaded),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 0usize).try_into().unwrap(),
+                4usize.try_into().unwrap(),
+            ),
+        ),
+    );
+    assert_vector_matches_f64(
+        svget4_f64::<{ 1usize as i32 }>(loaded),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 1usize).try_into().unwrap(),
+                4usize.try_into().unwrap(),
+            ),
+        ),
+    );
+    assert_vector_matches_f64(
+        svget4_f64::<{ 2usize as i32 }>(loaded),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 2usize).try_into().unwrap(),
+                4usize.try_into().unwrap(),
+            ),
+        ),
+    );
+    assert_vector_matches_f64(
+        svget4_f64::<{ 3usize as i32 }>(loaded),
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 3usize).try_into().unwrap(),
+                4usize.try_into().unwrap(),
+            ),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_vnum_s8_with_svst4_vnum_s8() {
+    let len = svcntb() as usize;
+    let mut storage = [0 as i8; 1280usize];
+    let data = svcreate4_s8(
+        svindex_s8(
+            (len + 0usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_s8(
+            (len + 1usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_s8(
+            (len + 2usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_s8(
+            (len + 3usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    svst4_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svld4_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1);
+    assert_vector_matches_i8(
+        svget4_s8::<{ 0usize as i32 }>(loaded),
+        svindex_s8(
+            (len + 0usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i8(
+        svget4_s8::<{ 1usize as i32 }>(loaded),
+        svindex_s8(
+            (len + 1usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i8(
+        svget4_s8::<{ 2usize as i32 }>(loaded),
+        svindex_s8(
+            (len + 2usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i8(
+        svget4_s8::<{ 3usize as i32 }>(loaded),
+        svindex_s8(
+            (len + 3usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_vnum_s16_with_svst4_vnum_s16() {
+    let len = svcnth() as usize;
+    let mut storage = [0 as i16; 640usize];
+    let data = svcreate4_s16(
+        svindex_s16(
+            (len + 0usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_s16(
+            (len + 1usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_s16(
+            (len + 2usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_s16(
+            (len + 3usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    svst4_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svld4_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1);
+    assert_vector_matches_i16(
+        svget4_s16::<{ 0usize as i32 }>(loaded),
+        svindex_s16(
+            (len + 0usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i16(
+        svget4_s16::<{ 1usize as i32 }>(loaded),
+        svindex_s16(
+            (len + 1usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i16(
+        svget4_s16::<{ 2usize as i32 }>(loaded),
+        svindex_s16(
+            (len + 2usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i16(
+        svget4_s16::<{ 3usize as i32 }>(loaded),
+        svindex_s16(
+            (len + 3usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_vnum_s32_with_svst4_vnum_s32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as i32; 320usize];
+    let data = svcreate4_s32(
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_s32(
+            (len + 1usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_s32(
+            (len + 2usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_s32(
+            (len + 3usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    svst4_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svld4_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1);
+    assert_vector_matches_i32(
+        svget4_s32::<{ 0usize as i32 }>(loaded),
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i32(
+        svget4_s32::<{ 1usize as i32 }>(loaded),
+        svindex_s32(
+            (len + 1usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i32(
+        svget4_s32::<{ 2usize as i32 }>(loaded),
+        svindex_s32(
+            (len + 2usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i32(
+        svget4_s32::<{ 3usize as i32 }>(loaded),
+        svindex_s32(
+            (len + 3usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_vnum_s64_with_svst4_vnum_s64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as i64; 160usize];
+    let data = svcreate4_s64(
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_s64(
+            (len + 1usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_s64(
+            (len + 2usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_s64(
+            (len + 3usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    svst4_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svld4_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1);
+    assert_vector_matches_i64(
+        svget4_s64::<{ 0usize as i32 }>(loaded),
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i64(
+        svget4_s64::<{ 1usize as i32 }>(loaded),
+        svindex_s64(
+            (len + 1usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i64(
+        svget4_s64::<{ 2usize as i32 }>(loaded),
+        svindex_s64(
+            (len + 2usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_i64(
+        svget4_s64::<{ 3usize as i32 }>(loaded),
+        svindex_s64(
+            (len + 3usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_vnum_u8_with_svst4_vnum_u8() {
+    let len = svcntb() as usize;
+    let mut storage = [0 as u8; 1280usize];
+    let data = svcreate4_u8(
+        svindex_u8(
+            (len + 0usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_u8(
+            (len + 1usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_u8(
+            (len + 2usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_u8(
+            (len + 3usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    svst4_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svld4_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1);
+    assert_vector_matches_u8(
+        svget4_u8::<{ 0usize as i32 }>(loaded),
+        svindex_u8(
+            (len + 0usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u8(
+        svget4_u8::<{ 1usize as i32 }>(loaded),
+        svindex_u8(
+            (len + 1usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u8(
+        svget4_u8::<{ 2usize as i32 }>(loaded),
+        svindex_u8(
+            (len + 2usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u8(
+        svget4_u8::<{ 3usize as i32 }>(loaded),
+        svindex_u8(
+            (len + 3usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_vnum_u16_with_svst4_vnum_u16() {
+    let len = svcnth() as usize;
+    let mut storage = [0 as u16; 640usize];
+    let data = svcreate4_u16(
+        svindex_u16(
+            (len + 0usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_u16(
+            (len + 1usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_u16(
+            (len + 2usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_u16(
+            (len + 3usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    svst4_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded = svld4_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1);
+    assert_vector_matches_u16(
+        svget4_u16::<{ 0usize as i32 }>(loaded),
+        svindex_u16(
+            (len + 0usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u16(
+        svget4_u16::<{ 1usize as i32 }>(loaded),
+        svindex_u16(
+            (len + 1usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u16(
+        svget4_u16::<{ 2usize as i32 }>(loaded),
+        svindex_u16(
+            (len + 2usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u16(
+        svget4_u16::<{ 3usize as i32 }>(loaded),
+        svindex_u16(
+            (len + 3usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_vnum_u32_with_svst4_vnum_u32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as u32; 320usize];
+    let data = svcreate4_u32(
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_u32(
+            (len + 1usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_u32(
+            (len + 2usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_u32(
+            (len + 3usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    svst4_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svld4_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1);
+    assert_vector_matches_u32(
+        svget4_u32::<{ 0usize as i32 }>(loaded),
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u32(
+        svget4_u32::<{ 1usize as i32 }>(loaded),
+        svindex_u32(
+            (len + 1usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u32(
+        svget4_u32::<{ 2usize as i32 }>(loaded),
+        svindex_u32(
+            (len + 2usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u32(
+        svget4_u32::<{ 3usize as i32 }>(loaded),
+        svindex_u32(
+            (len + 3usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svld4_vnum_u64_with_svst4_vnum_u64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as u64; 160usize];
+    let data = svcreate4_u64(
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_u64(
+            (len + 1usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_u64(
+            (len + 2usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+        svindex_u64(
+            (len + 3usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    svst4_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svld4_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1);
+    assert_vector_matches_u64(
+        svget4_u64::<{ 0usize as i32 }>(loaded),
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u64(
+        svget4_u64::<{ 1usize as i32 }>(loaded),
+        svindex_u64(
+            (len + 1usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u64(
+        svget4_u64::<{ 2usize as i32 }>(loaded),
+        svindex_u64(
+            (len + 2usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+    assert_vector_matches_u64(
+        svget4_u64::<{ 3usize as i32 }>(loaded),
+        svindex_u64(
+            (len + 3usize).try_into().unwrap(),
+            4usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_f32() {
+    svsetffr();
+    let _ = svld1_f32(svptrue_b32(), F32_DATA.as_ptr());
+    let loaded = svldff1_f32(svptrue_b32(), F32_DATA.as_ptr());
+    assert_vector_matches_f32(
+        loaded,
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_f64() {
+    svsetffr();
+    let _ = svld1_f64(svptrue_b64(), F64_DATA.as_ptr());
+    let loaded = svldff1_f64(svptrue_b64(), F64_DATA.as_ptr());
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_s8() {
+    svsetffr();
+    let _ = svld1_s8(svptrue_b8(), I8_DATA.as_ptr());
+    let loaded = svldff1_s8(svptrue_b8(), I8_DATA.as_ptr());
+    assert_vector_matches_i8(
+        loaded,
+        svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_s16() {
+    svsetffr();
+    let _ = svld1_s16(svptrue_b16(), I16_DATA.as_ptr());
+    let loaded = svldff1_s16(svptrue_b16(), I16_DATA.as_ptr());
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_s32() {
+    svsetffr();
+    let _ = svld1_s32(svptrue_b32(), I32_DATA.as_ptr());
+    let loaded = svldff1_s32(svptrue_b32(), I32_DATA.as_ptr());
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_s64() {
+    svsetffr();
+    let _ = svld1_s64(svptrue_b64(), I64_DATA.as_ptr());
+    let loaded = svldff1_s64(svptrue_b64(), I64_DATA.as_ptr());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_u8() {
+    svsetffr();
+    let _ = svld1_u8(svptrue_b8(), U8_DATA.as_ptr());
+    let loaded = svldff1_u8(svptrue_b8(), U8_DATA.as_ptr());
+    assert_vector_matches_u8(
+        loaded,
+        svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_u16() {
+    svsetffr();
+    let _ = svld1_u16(svptrue_b16(), U16_DATA.as_ptr());
+    let loaded = svldff1_u16(svptrue_b16(), U16_DATA.as_ptr());
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_u32() {
+    svsetffr();
+    let _ = svld1_u32(svptrue_b32(), U32_DATA.as_ptr());
+    let loaded = svldff1_u32(svptrue_b32(), U32_DATA.as_ptr());
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_u64() {
+    svsetffr();
+    let _ = svld1_u64(svptrue_b64(), U64_DATA.as_ptr());
+    let loaded = svldff1_u64(svptrue_b64(), U64_DATA.as_ptr());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_vnum_f32() {
+    svsetffr();
+    let _ = svld1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1);
+    let loaded = svldff1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_f32(
+        loaded,
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 0usize).try_into().unwrap(),
+                1usize.try_into().unwrap(),
+            ),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_vnum_f64() {
+    svsetffr();
+    let _ = svld1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1);
+    let loaded = svldff1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 0usize).try_into().unwrap(),
+                1usize.try_into().unwrap(),
+            ),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_vnum_s8() {
+    svsetffr();
+    let _ = svld1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let loaded = svldff1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let len = svcntb() as usize;
+    assert_vector_matches_i8(
+        loaded,
+        svindex_s8(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_vnum_s16() {
+    svsetffr();
+    let _ = svld1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let loaded = svldff1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let len = svcnth() as usize;
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_vnum_s32() {
+    svsetffr();
+    let _ = svld1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1);
+    let loaded = svldff1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_vnum_s64() {
+    svsetffr();
+    let _ = svld1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1);
+    let loaded = svldff1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_vnum_u8() {
+    svsetffr();
+    let _ = svld1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let loaded = svldff1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let len = svcntb() as usize;
+    assert_vector_matches_u8(
+        loaded,
+        svindex_u8(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_vnum_u16() {
+    svsetffr();
+    let _ = svld1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let loaded = svldff1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let len = svcnth() as usize;
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_vnum_u32() {
+    svsetffr();
+    let _ = svld1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1);
+    let loaded = svldff1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1_vnum_u64() {
+    svsetffr();
+    let _ = svld1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1);
+    let loaded = svldff1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sb_s16() {
+    svsetffr();
+    let _ = svld1sb_s16(svptrue_b8(), I8_DATA.as_ptr());
+    let loaded = svldff1sb_s16(svptrue_b8(), I8_DATA.as_ptr());
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sb_s32() {
+    svsetffr();
+    let _ = svld1sb_s32(svptrue_b8(), I8_DATA.as_ptr());
+    let loaded = svldff1sb_s32(svptrue_b8(), I8_DATA.as_ptr());
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sh_s32() {
+    svsetffr();
+    let _ = svld1sh_s32(svptrue_b16(), I16_DATA.as_ptr());
+    let loaded = svldff1sh_s32(svptrue_b16(), I16_DATA.as_ptr());
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sb_s64() {
+    svsetffr();
+    let _ = svld1sb_s64(svptrue_b8(), I8_DATA.as_ptr());
+    let loaded = svldff1sb_s64(svptrue_b8(), I8_DATA.as_ptr());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sh_s64() {
+    svsetffr();
+    let _ = svld1sh_s64(svptrue_b16(), I16_DATA.as_ptr());
+    let loaded = svldff1sh_s64(svptrue_b16(), I16_DATA.as_ptr());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sw_s64() {
+    svsetffr();
+    let _ = svld1sw_s64(svptrue_b32(), I32_DATA.as_ptr());
+    let loaded = svldff1sw_s64(svptrue_b32(), I32_DATA.as_ptr());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sb_u16() {
+    svsetffr();
+    let _ = svld1sb_u16(svptrue_b8(), I8_DATA.as_ptr());
+    let loaded = svldff1sb_u16(svptrue_b8(), I8_DATA.as_ptr());
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sb_u32() {
+    svsetffr();
+    let _ = svld1sb_u32(svptrue_b8(), I8_DATA.as_ptr());
+    let loaded = svldff1sb_u32(svptrue_b8(), I8_DATA.as_ptr());
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sh_u32() {
+    svsetffr();
+    let _ = svld1sh_u32(svptrue_b16(), I16_DATA.as_ptr());
+    let loaded = svldff1sh_u32(svptrue_b16(), I16_DATA.as_ptr());
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sb_u64() {
+    svsetffr();
+    let _ = svld1sb_u64(svptrue_b8(), I8_DATA.as_ptr());
+    let loaded = svldff1sb_u64(svptrue_b8(), I8_DATA.as_ptr());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sh_u64() {
+    svsetffr();
+    let _ = svld1sh_u64(svptrue_b16(), I16_DATA.as_ptr());
+    let loaded = svldff1sh_u64(svptrue_b16(), I16_DATA.as_ptr());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sw_u64() {
+    svsetffr();
+    let _ = svld1sw_u64(svptrue_b32(), I32_DATA.as_ptr());
+    let loaded = svldff1sw_u64(svptrue_b32(), I32_DATA.as_ptr());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sb_vnum_s16() {
+    svsetffr();
+    let _ = svld1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let loaded = svldff1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let len = svcnth() as usize;
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sb_vnum_s32() {
+    svsetffr();
+    let _ = svld1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let loaded = svldff1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sh_vnum_s32() {
+    svsetffr();
+    let _ = svld1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let loaded = svldff1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sb_vnum_s64() {
+    svsetffr();
+    let _ = svld1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let loaded = svldff1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sh_vnum_s64() {
+    svsetffr();
+    let _ = svld1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let loaded = svldff1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sw_vnum_s64() {
+    svsetffr();
+    let _ = svld1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1);
+    let loaded = svldff1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sb_vnum_u16() {
+    svsetffr();
+    let _ = svld1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let loaded = svldff1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let len = svcnth() as usize;
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sb_vnum_u32() {
+    svsetffr();
+    let _ = svld1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let loaded = svldff1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sh_vnum_u32() {
+    svsetffr();
+    let _ = svld1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let loaded = svldff1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sb_vnum_u64() {
+    svsetffr();
+    let _ = svld1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let loaded = svldff1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sh_vnum_u64() {
+    svsetffr();
+    let _ = svld1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let loaded = svldff1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1sw_vnum_u64() {
+    svsetffr();
+    let _ = svld1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1);
+    let loaded = svldff1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1ub_s16() {
+    svsetffr();
+    let _ = svld1ub_s16(svptrue_b8(), U8_DATA.as_ptr());
+    let loaded = svldff1ub_s16(svptrue_b8(), U8_DATA.as_ptr());
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1ub_s32() {
+    svsetffr();
+    let _ = svld1ub_s32(svptrue_b8(), U8_DATA.as_ptr());
+    let loaded = svldff1ub_s32(svptrue_b8(), U8_DATA.as_ptr());
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1uh_s32() {
+    svsetffr();
+    let _ = svld1uh_s32(svptrue_b16(), U16_DATA.as_ptr());
+    let loaded = svldff1uh_s32(svptrue_b16(), U16_DATA.as_ptr());
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1ub_s64() {
+    svsetffr();
+    let _ = svld1ub_s64(svptrue_b8(), U8_DATA.as_ptr());
+    let loaded = svldff1ub_s64(svptrue_b8(), U8_DATA.as_ptr());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1uh_s64() {
+    svsetffr();
+    let _ = svld1uh_s64(svptrue_b16(), U16_DATA.as_ptr());
+    let loaded = svldff1uh_s64(svptrue_b16(), U16_DATA.as_ptr());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1uw_s64() {
+    svsetffr();
+    let _ = svld1uw_s64(svptrue_b32(), U32_DATA.as_ptr());
+    let loaded = svldff1uw_s64(svptrue_b32(), U32_DATA.as_ptr());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1ub_u16() {
+    svsetffr();
+    let _ = svld1ub_u16(svptrue_b8(), U8_DATA.as_ptr());
+    let loaded = svldff1ub_u16(svptrue_b8(), U8_DATA.as_ptr());
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1ub_u32() {
+    svsetffr();
+    let _ = svld1ub_u32(svptrue_b8(), U8_DATA.as_ptr());
+    let loaded = svldff1ub_u32(svptrue_b8(), U8_DATA.as_ptr());
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1uh_u32() {
+    svsetffr();
+    let _ = svld1uh_u32(svptrue_b16(), U16_DATA.as_ptr());
+    let loaded = svldff1uh_u32(svptrue_b16(), U16_DATA.as_ptr());
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1ub_u64() {
+    svsetffr();
+    let _ = svld1ub_u64(svptrue_b8(), U8_DATA.as_ptr());
+    let loaded = svldff1ub_u64(svptrue_b8(), U8_DATA.as_ptr());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1uh_u64() {
+    svsetffr();
+    let _ = svld1uh_u64(svptrue_b16(), U16_DATA.as_ptr());
+    let loaded = svldff1uh_u64(svptrue_b16(), U16_DATA.as_ptr());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1uw_u64() {
+    svsetffr();
+    let _ = svld1uw_u64(svptrue_b32(), U32_DATA.as_ptr());
+    let loaded = svldff1uw_u64(svptrue_b32(), U32_DATA.as_ptr());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1ub_vnum_s16() {
+    svsetffr();
+    let _ = svld1ub_vnum_s16(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let loaded = svldff1ub_vnum_s16(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let len = svcnth() as usize;
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1ub_vnum_s32() {
+    svsetffr();
+    let _ = svld1ub_vnum_s32(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let loaded = svldff1ub_vnum_s32(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1uh_vnum_s32() {
+    svsetffr();
+    let _ = svld1uh_vnum_s32(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let loaded = svldff1uh_vnum_s32(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1ub_vnum_s64() {
+    svsetffr();
+    let _ = svld1ub_vnum_s64(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let loaded = svldff1ub_vnum_s64(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1uh_vnum_s64() {
+    svsetffr();
+    let _ = svld1uh_vnum_s64(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let loaded = svldff1uh_vnum_s64(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1uw_vnum_s64() {
+    svsetffr();
+    let _ = svld1uw_vnum_s64(svptrue_b32(), U32_DATA.as_ptr(), 1);
+    let loaded = svldff1uw_vnum_s64(svptrue_b32(), U32_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1ub_vnum_u16() {
+    svsetffr();
+    let _ = svld1ub_vnum_u16(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let loaded = svldff1ub_vnum_u16(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let len = svcnth() as usize;
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1ub_vnum_u32() {
+    svsetffr();
+    let _ = svld1ub_vnum_u32(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let loaded = svldff1ub_vnum_u32(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1uh_vnum_u32() {
+    svsetffr();
+    let _ = svld1uh_vnum_u32(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let loaded = svldff1uh_vnum_u32(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1ub_vnum_u64() {
+    svsetffr();
+    let _ = svld1ub_vnum_u64(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let loaded = svldff1ub_vnum_u64(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1uh_vnum_u64() {
+    svsetffr();
+    let _ = svld1uh_vnum_u64(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let loaded = svldff1uh_vnum_u64(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldff1uw_vnum_u64() {
+    svsetffr();
+    let _ = svld1uw_vnum_u64(svptrue_b32(), U32_DATA.as_ptr(), 1);
+    let loaded = svldff1uw_vnum_u64(svptrue_b32(), U32_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_f32() {
+    svsetffr();
+    let _ = svld1_f32(svptrue_b32(), F32_DATA.as_ptr());
+    let loaded = svldnf1_f32(svptrue_b32(), F32_DATA.as_ptr());
+    assert_vector_matches_f32(
+        loaded,
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_f64() {
+    svsetffr();
+    let _ = svld1_f64(svptrue_b64(), F64_DATA.as_ptr());
+    let loaded = svldnf1_f64(svptrue_b64(), F64_DATA.as_ptr());
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_s8() {
+    svsetffr();
+    let _ = svld1_s8(svptrue_b8(), I8_DATA.as_ptr());
+    let loaded = svldnf1_s8(svptrue_b8(), I8_DATA.as_ptr());
+    assert_vector_matches_i8(
+        loaded,
+        svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_s16() {
+    svsetffr();
+    let _ = svld1_s16(svptrue_b16(), I16_DATA.as_ptr());
+    let loaded = svldnf1_s16(svptrue_b16(), I16_DATA.as_ptr());
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_s32() {
+    svsetffr();
+    let _ = svld1_s32(svptrue_b32(), I32_DATA.as_ptr());
+    let loaded = svldnf1_s32(svptrue_b32(), I32_DATA.as_ptr());
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_s64() {
+    svsetffr();
+    let _ = svld1_s64(svptrue_b64(), I64_DATA.as_ptr());
+    let loaded = svldnf1_s64(svptrue_b64(), I64_DATA.as_ptr());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_u8() {
+    svsetffr();
+    let _ = svld1_u8(svptrue_b8(), U8_DATA.as_ptr());
+    let loaded = svldnf1_u8(svptrue_b8(), U8_DATA.as_ptr());
+    assert_vector_matches_u8(
+        loaded,
+        svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_u16() {
+    svsetffr();
+    let _ = svld1_u16(svptrue_b16(), U16_DATA.as_ptr());
+    let loaded = svldnf1_u16(svptrue_b16(), U16_DATA.as_ptr());
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_u32() {
+    svsetffr();
+    let _ = svld1_u32(svptrue_b32(), U32_DATA.as_ptr());
+    let loaded = svldnf1_u32(svptrue_b32(), U32_DATA.as_ptr());
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_u64() {
+    svsetffr();
+    let _ = svld1_u64(svptrue_b64(), U64_DATA.as_ptr());
+    let loaded = svldnf1_u64(svptrue_b64(), U64_DATA.as_ptr());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_vnum_f32() {
+    svsetffr();
+    let _ = svld1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1);
+    let loaded = svldnf1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_f32(
+        loaded,
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 0usize).try_into().unwrap(),
+                1usize.try_into().unwrap(),
+            ),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_vnum_f64() {
+    svsetffr();
+    let _ = svld1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1);
+    let loaded = svldnf1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 0usize).try_into().unwrap(),
+                1usize.try_into().unwrap(),
+            ),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_vnum_s8() {
+    svsetffr();
+    let _ = svld1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let loaded = svldnf1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let len = svcntb() as usize;
+    assert_vector_matches_i8(
+        loaded,
+        svindex_s8(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_vnum_s16() {
+    svsetffr();
+    let _ = svld1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let loaded = svldnf1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let len = svcnth() as usize;
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_vnum_s32() {
+    svsetffr();
+    let _ = svld1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1);
+    let loaded = svldnf1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_vnum_s64() {
+    svsetffr();
+    let _ = svld1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1);
+    let loaded = svldnf1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_vnum_u8() {
+    svsetffr();
+    let _ = svld1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let loaded = svldnf1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let len = svcntb() as usize;
+    assert_vector_matches_u8(
+        loaded,
+        svindex_u8(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_vnum_u16() {
+    svsetffr();
+    let _ = svld1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let loaded = svldnf1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let len = svcnth() as usize;
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_vnum_u32() {
+    svsetffr();
+    let _ = svld1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1);
+    let loaded = svldnf1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1_vnum_u64() {
+    svsetffr();
+    let _ = svld1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1);
+    let loaded = svldnf1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sb_s16() {
+    svsetffr();
+    let _ = svld1sb_s16(svptrue_b8(), I8_DATA.as_ptr());
+    let loaded = svldnf1sb_s16(svptrue_b8(), I8_DATA.as_ptr());
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sb_s32() {
+    svsetffr();
+    let _ = svld1sb_s32(svptrue_b8(), I8_DATA.as_ptr());
+    let loaded = svldnf1sb_s32(svptrue_b8(), I8_DATA.as_ptr());
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sh_s32() {
+    svsetffr();
+    let _ = svld1sh_s32(svptrue_b16(), I16_DATA.as_ptr());
+    let loaded = svldnf1sh_s32(svptrue_b16(), I16_DATA.as_ptr());
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sb_s64() {
+    svsetffr();
+    let _ = svld1sb_s64(svptrue_b8(), I8_DATA.as_ptr());
+    let loaded = svldnf1sb_s64(svptrue_b8(), I8_DATA.as_ptr());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sh_s64() {
+    svsetffr();
+    let _ = svld1sh_s64(svptrue_b16(), I16_DATA.as_ptr());
+    let loaded = svldnf1sh_s64(svptrue_b16(), I16_DATA.as_ptr());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sw_s64() {
+    svsetffr();
+    let _ = svld1sw_s64(svptrue_b32(), I32_DATA.as_ptr());
+    let loaded = svldnf1sw_s64(svptrue_b32(), I32_DATA.as_ptr());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sb_u16() {
+    svsetffr();
+    let _ = svld1sb_u16(svptrue_b8(), I8_DATA.as_ptr());
+    let loaded = svldnf1sb_u16(svptrue_b8(), I8_DATA.as_ptr());
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sb_u32() {
+    svsetffr();
+    let _ = svld1sb_u32(svptrue_b8(), I8_DATA.as_ptr());
+    let loaded = svldnf1sb_u32(svptrue_b8(), I8_DATA.as_ptr());
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sh_u32() {
+    svsetffr();
+    let _ = svld1sh_u32(svptrue_b16(), I16_DATA.as_ptr());
+    let loaded = svldnf1sh_u32(svptrue_b16(), I16_DATA.as_ptr());
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sb_u64() {
+    svsetffr();
+    let _ = svld1sb_u64(svptrue_b8(), I8_DATA.as_ptr());
+    let loaded = svldnf1sb_u64(svptrue_b8(), I8_DATA.as_ptr());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sh_u64() {
+    svsetffr();
+    let _ = svld1sh_u64(svptrue_b16(), I16_DATA.as_ptr());
+    let loaded = svldnf1sh_u64(svptrue_b16(), I16_DATA.as_ptr());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sw_u64() {
+    svsetffr();
+    let _ = svld1sw_u64(svptrue_b32(), I32_DATA.as_ptr());
+    let loaded = svldnf1sw_u64(svptrue_b32(), I32_DATA.as_ptr());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sb_vnum_s16() {
+    svsetffr();
+    let _ = svld1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let loaded = svldnf1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let len = svcnth() as usize;
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sb_vnum_s32() {
+    svsetffr();
+    let _ = svld1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let loaded = svldnf1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sh_vnum_s32() {
+    svsetffr();
+    let _ = svld1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let loaded = svldnf1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sb_vnum_s64() {
+    svsetffr();
+    let _ = svld1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let loaded = svldnf1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sh_vnum_s64() {
+    svsetffr();
+    let _ = svld1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let loaded = svldnf1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sw_vnum_s64() {
+    svsetffr();
+    let _ = svld1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1);
+    let loaded = svldnf1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sb_vnum_u16() {
+    svsetffr();
+    let _ = svld1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let loaded = svldnf1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let len = svcnth() as usize;
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sb_vnum_u32() {
+    svsetffr();
+    let _ = svld1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let loaded = svldnf1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sh_vnum_u32() {
+    svsetffr();
+    let _ = svld1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let loaded = svldnf1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sb_vnum_u64() {
+    svsetffr();
+    let _ = svld1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let loaded = svldnf1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sh_vnum_u64() {
+    svsetffr();
+    let _ = svld1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let loaded = svldnf1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1sw_vnum_u64() {
+    svsetffr();
+    let _ = svld1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1);
+    let loaded = svldnf1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1ub_s16() {
+    svsetffr();
+    let _ = svld1ub_s16(svptrue_b8(), U8_DATA.as_ptr());
+    let loaded = svldnf1ub_s16(svptrue_b8(), U8_DATA.as_ptr());
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1ub_s32() {
+    svsetffr();
+    let _ = svld1ub_s32(svptrue_b8(), U8_DATA.as_ptr());
+    let loaded = svldnf1ub_s32(svptrue_b8(), U8_DATA.as_ptr());
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1uh_s32() {
+    svsetffr();
+    let _ = svld1uh_s32(svptrue_b16(), U16_DATA.as_ptr());
+    let loaded = svldnf1uh_s32(svptrue_b16(), U16_DATA.as_ptr());
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1ub_s64() {
+    svsetffr();
+    let _ = svld1ub_s64(svptrue_b8(), U8_DATA.as_ptr());
+    let loaded = svldnf1ub_s64(svptrue_b8(), U8_DATA.as_ptr());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1uh_s64() {
+    svsetffr();
+    let _ = svld1uh_s64(svptrue_b16(), U16_DATA.as_ptr());
+    let loaded = svldnf1uh_s64(svptrue_b16(), U16_DATA.as_ptr());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1uw_s64() {
+    svsetffr();
+    let _ = svld1uw_s64(svptrue_b32(), U32_DATA.as_ptr());
+    let loaded = svldnf1uw_s64(svptrue_b32(), U32_DATA.as_ptr());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1ub_u16() {
+    svsetffr();
+    let _ = svld1ub_u16(svptrue_b8(), U8_DATA.as_ptr());
+    let loaded = svldnf1ub_u16(svptrue_b8(), U8_DATA.as_ptr());
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1ub_u32() {
+    svsetffr();
+    let _ = svld1ub_u32(svptrue_b8(), U8_DATA.as_ptr());
+    let loaded = svldnf1ub_u32(svptrue_b8(), U8_DATA.as_ptr());
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1uh_u32() {
+    svsetffr();
+    let _ = svld1uh_u32(svptrue_b16(), U16_DATA.as_ptr());
+    let loaded = svldnf1uh_u32(svptrue_b16(), U16_DATA.as_ptr());
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1ub_u64() {
+    svsetffr();
+    let _ = svld1ub_u64(svptrue_b8(), U8_DATA.as_ptr());
+    let loaded = svldnf1ub_u64(svptrue_b8(), U8_DATA.as_ptr());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1uh_u64() {
+    svsetffr();
+    let _ = svld1uh_u64(svptrue_b16(), U16_DATA.as_ptr());
+    let loaded = svldnf1uh_u64(svptrue_b16(), U16_DATA.as_ptr());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1uw_u64() {
+    svsetffr();
+    let _ = svld1uw_u64(svptrue_b32(), U32_DATA.as_ptr());
+    let loaded = svldnf1uw_u64(svptrue_b32(), U32_DATA.as_ptr());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1ub_vnum_s16() {
+    svsetffr();
+    let _ = svld1ub_vnum_s16(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let loaded = svldnf1ub_vnum_s16(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let len = svcnth() as usize;
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1ub_vnum_s32() {
+    svsetffr();
+    let _ = svld1ub_vnum_s32(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let loaded = svldnf1ub_vnum_s32(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1uh_vnum_s32() {
+    svsetffr();
+    let _ = svld1uh_vnum_s32(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let loaded = svldnf1uh_vnum_s32(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1ub_vnum_s64() {
+    svsetffr();
+    let _ = svld1ub_vnum_s64(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let loaded = svldnf1ub_vnum_s64(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1uh_vnum_s64() {
+    svsetffr();
+    let _ = svld1uh_vnum_s64(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let loaded = svldnf1uh_vnum_s64(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1uw_vnum_s64() {
+    svsetffr();
+    let _ = svld1uw_vnum_s64(svptrue_b32(), U32_DATA.as_ptr(), 1);
+    let loaded = svldnf1uw_vnum_s64(svptrue_b32(), U32_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1ub_vnum_u16() {
+    svsetffr();
+    let _ = svld1ub_vnum_u16(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let loaded = svldnf1ub_vnum_u16(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let len = svcnth() as usize;
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1ub_vnum_u32() {
+    svsetffr();
+    let _ = svld1ub_vnum_u32(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let loaded = svldnf1ub_vnum_u32(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1uh_vnum_u32() {
+    svsetffr();
+    let _ = svld1uh_vnum_u32(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let loaded = svldnf1uh_vnum_u32(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let len = svcntw() as usize;
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1ub_vnum_u64() {
+    svsetffr();
+    let _ = svld1ub_vnum_u64(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let loaded = svldnf1ub_vnum_u64(svptrue_b8(), U8_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1uh_vnum_u64() {
+    svsetffr();
+    let _ = svld1uh_vnum_u64(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let loaded = svldnf1uh_vnum_u64(svptrue_b16(), U16_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnf1uw_vnum_u64() {
+    svsetffr();
+    let _ = svld1uw_vnum_u64(svptrue_b32(), U32_DATA.as_ptr(), 1);
+    let loaded = svldnf1uw_vnum_u64(svptrue_b32(), U32_DATA.as_ptr(), 1);
+    let len = svcntd() as usize;
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_f32_with_svstnt1_f32() {
+    let mut storage = [0 as f32; 320usize];
+    let data = svcvt_f32_s32_x(
+        svptrue_b32(),
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    svstnt1_f32(svptrue_b32(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f32 || val == i as f32);
+    }
+    svsetffr();
+    let loaded = svldnt1_f32(svptrue_b32(), storage.as_ptr() as *const f32);
+    assert_vector_matches_f32(
+        loaded,
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_f64_with_svstnt1_f64() {
+    let mut storage = [0 as f64; 160usize];
+    let data = svcvt_f64_s64_x(
+        svptrue_b64(),
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    svstnt1_f64(svptrue_b64(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svldnt1_f64(svptrue_b64(), storage.as_ptr() as *const f64);
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_s8_with_svstnt1_s8() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svstnt1_s8(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svldnt1_s8(svptrue_b8(), storage.as_ptr() as *const i8);
+    assert_vector_matches_i8(
+        loaded,
+        svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_s16_with_svstnt1_s16() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svstnt1_s16(svptrue_b16(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svldnt1_s16(svptrue_b16(), storage.as_ptr() as *const i16);
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_s32_with_svstnt1_s32() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svstnt1_s32(svptrue_b32(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svldnt1_s32(svptrue_b32(), storage.as_ptr() as *const i32);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_s64_with_svstnt1_s64() {
+    let mut storage = [0 as i64; 160usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svstnt1_s64(svptrue_b64(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svldnt1_s64(svptrue_b64(), storage.as_ptr() as *const i64);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_u8_with_svstnt1_u8() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svstnt1_u8(svptrue_b8(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svldnt1_u8(svptrue_b8(), storage.as_ptr() as *const u8);
+    assert_vector_matches_u8(
+        loaded,
+        svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_u16_with_svstnt1_u16() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svstnt1_u16(svptrue_b16(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded = svldnt1_u16(svptrue_b16(), storage.as_ptr() as *const u16);
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_u32_with_svstnt1_u32() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svstnt1_u32(svptrue_b32(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svldnt1_u32(svptrue_b32(), storage.as_ptr() as *const u32);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_u64_with_svstnt1_u64() {
+    let mut storage = [0 as u64; 160usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    svstnt1_u64(svptrue_b64(), storage.as_mut_ptr(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svldnt1_u64(svptrue_b64(), storage.as_ptr() as *const u64);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_vnum_f32_with_svstnt1_vnum_f32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as f32; 320usize];
+    let data = svcvt_f32_s32_x(
+        svptrue_b32(),
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+    svstnt1_vnum_f32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f32 || val == i as f32);
+    }
+    svsetffr();
+    let loaded = svldnt1_vnum_f32(svptrue_b32(), storage.as_ptr() as *const f32, 1);
+    assert_vector_matches_f32(
+        loaded,
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32(
+                (len + 0usize).try_into().unwrap(),
+                1usize.try_into().unwrap(),
+            ),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_vnum_f64_with_svstnt1_vnum_f64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as f64; 160usize];
+    let data = svcvt_f64_s64_x(
+        svptrue_b64(),
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+    svstnt1_vnum_f64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svldnt1_vnum_f64(svptrue_b64(), storage.as_ptr() as *const f64, 1);
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64(
+                (len + 0usize).try_into().unwrap(),
+                1usize.try_into().unwrap(),
+            ),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_vnum_s8_with_svstnt1_vnum_s8() {
+    let len = svcntb() as usize;
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s8(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svstnt1_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svldnt1_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1);
+    assert_vector_matches_i8(
+        loaded,
+        svindex_s8(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_vnum_s16_with_svstnt1_vnum_s16() {
+    let len = svcnth() as usize;
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s16(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svstnt1_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svldnt1_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1);
+    assert_vector_matches_i16(
+        loaded,
+        svindex_s16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_vnum_s32_with_svstnt1_vnum_s32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s32(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svstnt1_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svldnt1_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_vnum_s64_with_svstnt1_vnum_s64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as i64; 160usize];
+    let data = svindex_s64(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svstnt1_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svldnt1_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_vnum_u8_with_svstnt1_vnum_u8() {
+    let len = svcntb() as usize;
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u8(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svstnt1_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded = svldnt1_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1);
+    assert_vector_matches_u8(
+        loaded,
+        svindex_u8(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_vnum_u16_with_svstnt1_vnum_u16() {
+    let len = svcnth() as usize;
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u16(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svstnt1_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded = svldnt1_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1);
+    assert_vector_matches_u16(
+        loaded,
+        svindex_u16(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_vnum_u32_with_svstnt1_vnum_u32() {
+    let len = svcntw() as usize;
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u32(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svstnt1_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svldnt1_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svldnt1_vnum_u64_with_svstnt1_vnum_u64() {
+    let len = svcntd() as usize;
+    let mut storage = [0 as u64; 160usize];
+    let data = svindex_u64(
+        (len + 0usize).try_into().unwrap(),
+        1usize.try_into().unwrap(),
+    );
+    svstnt1_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svldnt1_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64(
+            (len + 0usize).try_into().unwrap(),
+            1usize.try_into().unwrap(),
+        ),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfb() {
+    svsetffr();
+    let loaded = svprfb::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b8(), I64_DATA.as_ptr());
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfh() {
+    svsetffr();
+    let loaded = svprfh::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b16(), I64_DATA.as_ptr());
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfw() {
+    svsetffr();
+    let loaded = svprfw::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b32(), I64_DATA.as_ptr());
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfd() {
+    svsetffr();
+    let loaded = svprfd::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b64(), I64_DATA.as_ptr());
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfb_gather_s32offset() {
+    let offsets = svindex_s32(0, 4u32.try_into().unwrap());
+    svsetffr();
+    let loaded = svprfb_gather_s32offset::<{ svprfop::SV_PLDL1KEEP }, i64>(
+        svptrue_b32(),
+        I64_DATA.as_ptr(),
+        offsets,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfh_gather_s32index() {
+    let indices = svindex_s32(0, 1);
+    svsetffr();
+    let loaded = svprfh_gather_s32index::<{ svprfop::SV_PLDL1KEEP }, i64>(
+        svptrue_b32(),
+        I64_DATA.as_ptr(),
+        indices,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfw_gather_s32index() {
+    let indices = svindex_s32(0, 1);
+    svsetffr();
+    let loaded = svprfw_gather_s32index::<{ svprfop::SV_PLDL1KEEP }, i64>(
+        svptrue_b32(),
+        I64_DATA.as_ptr(),
+        indices,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfd_gather_s32index() {
+    let indices = svindex_s32(0, 1);
+    svsetffr();
+    let loaded = svprfd_gather_s32index::<{ svprfop::SV_PLDL1KEEP }, i64>(
+        svptrue_b32(),
+        I64_DATA.as_ptr(),
+        indices,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfb_gather_s64offset() {
+    let offsets = svindex_s64(0, 8u32.try_into().unwrap());
+    svsetffr();
+    let loaded = svprfb_gather_s64offset::<{ svprfop::SV_PLDL1KEEP }, i64>(
+        svptrue_b64(),
+        I64_DATA.as_ptr(),
+        offsets,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfh_gather_s64index() {
+    let indices = svindex_s64(0, 1);
+    svsetffr();
+    let loaded = svprfh_gather_s64index::<{ svprfop::SV_PLDL1KEEP }, i64>(
+        svptrue_b64(),
+        I64_DATA.as_ptr(),
+        indices,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfw_gather_s64index() {
+    let indices = svindex_s64(0, 1);
+    svsetffr();
+    let loaded = svprfw_gather_s64index::<{ svprfop::SV_PLDL1KEEP }, i64>(
+        svptrue_b64(),
+        I64_DATA.as_ptr(),
+        indices,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfd_gather_s64index() {
+    let indices = svindex_s64(0, 1);
+    svsetffr();
+    let loaded = svprfd_gather_s64index::<{ svprfop::SV_PLDL1KEEP }, i64>(
+        svptrue_b64(),
+        I64_DATA.as_ptr(),
+        indices,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfb_gather_u32offset() {
+    let offsets = svindex_u32(0, 4u32.try_into().unwrap());
+    svsetffr();
+    let loaded = svprfb_gather_u32offset::<{ svprfop::SV_PLDL1KEEP }, i64>(
+        svptrue_b32(),
+        I64_DATA.as_ptr(),
+        offsets,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfh_gather_u32index() {
+    let indices = svindex_u32(0, 1);
+    svsetffr();
+    let loaded = svprfh_gather_u32index::<{ svprfop::SV_PLDL1KEEP }, i64>(
+        svptrue_b32(),
+        I64_DATA.as_ptr(),
+        indices,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfw_gather_u32index() {
+    let indices = svindex_u32(0, 1);
+    svsetffr();
+    let loaded = svprfw_gather_u32index::<{ svprfop::SV_PLDL1KEEP }, i64>(
+        svptrue_b32(),
+        I64_DATA.as_ptr(),
+        indices,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfd_gather_u32index() {
+    let indices = svindex_u32(0, 1);
+    svsetffr();
+    let loaded = svprfd_gather_u32index::<{ svprfop::SV_PLDL1KEEP }, i64>(
+        svptrue_b32(),
+        I64_DATA.as_ptr(),
+        indices,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfb_gather_u64offset() {
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    svsetffr();
+    let loaded = svprfb_gather_u64offset::<{ svprfop::SV_PLDL1KEEP }, i64>(
+        svptrue_b64(),
+        I64_DATA.as_ptr(),
+        offsets,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfh_gather_u64index() {
+    let indices = svindex_u64(0, 1);
+    svsetffr();
+    let loaded = svprfh_gather_u64index::<{ svprfop::SV_PLDL1KEEP }, i64>(
+        svptrue_b64(),
+        I64_DATA.as_ptr(),
+        indices,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfw_gather_u64index() {
+    let indices = svindex_u64(0, 1);
+    svsetffr();
+    let loaded = svprfw_gather_u64index::<{ svprfop::SV_PLDL1KEEP }, i64>(
+        svptrue_b64(),
+        I64_DATA.as_ptr(),
+        indices,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfd_gather_u64index() {
+    let indices = svindex_u64(0, 1);
+    svsetffr();
+    let loaded = svprfd_gather_u64index::<{ svprfop::SV_PLDL1KEEP }, i64>(
+        svptrue_b64(),
+        I64_DATA.as_ptr(),
+        indices,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfb_gather_u64base() {
+    let bases = svdup_n_u64(U64_DATA.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svsetffr();
+    let loaded = svprfb_gather_u64base::<{ svprfop::SV_PLDL1KEEP }>(svptrue_b64(), bases);
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfh_gather_u64base() {
+    let bases = svdup_n_u64(U64_DATA.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svsetffr();
+    let loaded = svprfh_gather_u64base::<{ svprfop::SV_PLDL1KEEP }>(svptrue_b64(), bases);
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfw_gather_u64base() {
+    let bases = svdup_n_u64(U64_DATA.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svsetffr();
+    let loaded = svprfw_gather_u64base::<{ svprfop::SV_PLDL1KEEP }>(svptrue_b64(), bases);
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfd_gather_u64base() {
+    let bases = svdup_n_u64(U64_DATA.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svsetffr();
+    let loaded = svprfd_gather_u64base::<{ svprfop::SV_PLDL1KEEP }>(svptrue_b64(), bases);
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfb_gather_u32base_offset() {
+    let bases = svindex_u32(0, 4u32.try_into().unwrap());
+    svsetffr();
+    let loaded = svprfb_gather_u32base_offset::<{ svprfop::SV_PLDL1KEEP }>(
+        svptrue_b32(),
+        bases,
+        U32_DATA.as_ptr() as i64 + 4u32 as i64,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfh_gather_u32base_index() {
+    let bases = svindex_u32(0, 4u32.try_into().unwrap());
+    svsetffr();
+    let loaded = svprfh_gather_u32base_index::<{ svprfop::SV_PLDL1KEEP }>(
+        svptrue_b32(),
+        bases,
+        U32_DATA.as_ptr() as i64 / (4u32 as i64) + 1,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfw_gather_u32base_index() {
+    let bases = svindex_u32(0, 4u32.try_into().unwrap());
+    svsetffr();
+    let loaded = svprfw_gather_u32base_index::<{ svprfop::SV_PLDL1KEEP }>(
+        svptrue_b32(),
+        bases,
+        U32_DATA.as_ptr() as i64 / (4u32 as i64) + 1,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfd_gather_u32base_index() {
+    let bases = svindex_u32(0, 4u32.try_into().unwrap());
+    svsetffr();
+    let loaded = svprfd_gather_u32base_index::<{ svprfop::SV_PLDL1KEEP }>(
+        svptrue_b32(),
+        bases,
+        U32_DATA.as_ptr() as i64 / (4u32 as i64) + 1,
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfb_gather_u64base_offset() {
+    let bases = svdup_n_u64(U64_DATA.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svsetffr();
+    let loaded = svprfb_gather_u64base_offset::<{ svprfop::SV_PLDL1KEEP }>(
+        svptrue_b64(),
+        bases,
+        8u32.try_into().unwrap(),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfh_gather_u64base_index() {
+    let bases = svdup_n_u64(U64_DATA.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svsetffr();
+    let loaded = svprfh_gather_u64base_index::<{ svprfop::SV_PLDL1KEEP }>(
+        svptrue_b64(),
+        bases,
+        1.try_into().unwrap(),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfw_gather_u64base_index() {
+    let bases = svdup_n_u64(U64_DATA.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svsetffr();
+    let loaded = svprfw_gather_u64base_index::<{ svprfop::SV_PLDL1KEEP }>(
+        svptrue_b64(),
+        bases,
+        1.try_into().unwrap(),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfd_gather_u64base_index() {
+    let bases = svdup_n_u64(U64_DATA.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svsetffr();
+    let loaded = svprfd_gather_u64base_index::<{ svprfop::SV_PLDL1KEEP }>(
+        svptrue_b64(),
+        bases,
+        1.try_into().unwrap(),
+    );
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfb_vnum() {
+    svsetffr();
+    let loaded = svprfb_vnum::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b8(), I64_DATA.as_ptr(), 1);
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfh_vnum() {
+    svsetffr();
+    let loaded = svprfh_vnum::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b16(), I64_DATA.as_ptr(), 1);
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfw_vnum() {
+    svsetffr();
+    let loaded = svprfw_vnum::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b32(), I64_DATA.as_ptr(), 1);
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_svprfd_vnum() {
+    svsetffr();
+    let loaded = svprfd_vnum::<{ svprfop::SV_PLDL1KEEP }, i64>(svptrue_b64(), I64_DATA.as_ptr(), 1);
+}
+#[simd_test(enable = "sve")]
+unsafe fn test_ffr() {
+    svsetffr();
+    let ffr = svrdffr();
+    assert_vector_matches_u8(svdup_n_u8_z(ffr, 1), svindex_u8(1, 0));
+    let pred = svdupq_n_b8(
+        true, false, true, false, true, false, true, false, true, false, true, false, true, false,
+        true, false,
+    );
+    svwrffr(pred);
+    let ffr = svrdffr_z(svptrue_b8());
+    assert_vector_matches_u8(svdup_n_u8_z(ffr, 1), svdup_n_u8_z(pred, 1));
+}
diff --git a/crates/core_arch/src/aarch64/sve/ld_st_tests_sve2.rs b/crates/core_arch/src/aarch64/sve/ld_st_tests_sve2.rs
new file mode 100644
index 0000000000..3fc821ec54
--- /dev/null
+++ b/crates/core_arch/src/aarch64/sve/ld_st_tests_sve2.rs
@@ -0,0 +1,2482 @@
+// This code is automatically generated. DO NOT MODIFY.
+//
+// Instead, modify `crates/stdarch-gen2/spec/sve` and run the following command to re-generate this
+// file:
+//
+// ```
+// cargo run --bin=stdarch-gen2 -- crates/stdarch-gen2/spec
+// ```
+#![allow(unused)]
+use super::*;
+use std::boxed::Box;
+use std::convert::{TryFrom, TryInto};
+use std::sync::LazyLock;
+use std::vec::Vec;
+use stdarch_test::simd_test;
+static F32_DATA: LazyLock<[f32; 64 * 5]> = LazyLock::new(|| {
+    (0..64 * 5)
+        .map(|i| i as f32)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("f32 data incorrectly initialised")
+});
+static F64_DATA: LazyLock<[f64; 32 * 5]> = LazyLock::new(|| {
+    (0..32 * 5)
+        .map(|i| i as f64)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("f64 data incorrectly initialised")
+});
+static I8_DATA: LazyLock<[i8; 256 * 5]> = LazyLock::new(|| {
+    (0..256 * 5)
+        .map(|i| ((i + 128) % 256 - 128) as i8)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("i8 data incorrectly initialised")
+});
+static I16_DATA: LazyLock<[i16; 128 * 5]> = LazyLock::new(|| {
+    (0..128 * 5)
+        .map(|i| i as i16)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("i16 data incorrectly initialised")
+});
+static I32_DATA: LazyLock<[i32; 64 * 5]> = LazyLock::new(|| {
+    (0..64 * 5)
+        .map(|i| i as i32)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("i32 data incorrectly initialised")
+});
+static I64_DATA: LazyLock<[i64; 32 * 5]> = LazyLock::new(|| {
+    (0..32 * 5)
+        .map(|i| i as i64)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("i64 data incorrectly initialised")
+});
+static U8_DATA: LazyLock<[u8; 256 * 5]> = LazyLock::new(|| {
+    (0..256 * 5)
+        .map(|i| i as u8)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("u8 data incorrectly initialised")
+});
+static U16_DATA: LazyLock<[u16; 128 * 5]> = LazyLock::new(|| {
+    (0..128 * 5)
+        .map(|i| i as u16)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("u16 data incorrectly initialised")
+});
+static U32_DATA: LazyLock<[u32; 64 * 5]> = LazyLock::new(|| {
+    (0..64 * 5)
+        .map(|i| i as u32)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("u32 data incorrectly initialised")
+});
+static U64_DATA: LazyLock<[u64; 32 * 5]> = LazyLock::new(|| {
+    (0..32 * 5)
+        .map(|i| i as u64)
+        .collect::<Vec<_>>()
+        .try_into()
+        .expect("u64 data incorrectly initialised")
+});
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_f32(vector: svfloat32_t, expected: svfloat32_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b32(), defined));
+    let cmp = svcmpne_f32(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_f64(vector: svfloat64_t, expected: svfloat64_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b64(), defined));
+    let cmp = svcmpne_f64(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_i8(vector: svint8_t, expected: svint8_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b8(), defined));
+    let cmp = svcmpne_s8(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_i16(vector: svint16_t, expected: svint16_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b16(), defined));
+    let cmp = svcmpne_s16(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_i32(vector: svint32_t, expected: svint32_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b32(), defined));
+    let cmp = svcmpne_s32(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_i64(vector: svint64_t, expected: svint64_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b64(), defined));
+    let cmp = svcmpne_s64(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_u8(vector: svuint8_t, expected: svuint8_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b8(), defined));
+    let cmp = svcmpne_u8(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_u16(vector: svuint16_t, expected: svuint16_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b16(), defined));
+    let cmp = svcmpne_u16(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_u32(vector: svuint32_t, expected: svuint32_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b32(), defined));
+    let cmp = svcmpne_u32(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[target_feature(enable = "sve")]
+fn assert_vector_matches_u64(vector: svuint64_t, expected: svuint64_t) {
+    let defined = svrdffr();
+    assert!(svptest_first(svptrue_b64(), defined));
+    let cmp = svcmpne_u64(defined, vector, expected);
+    assert!(!svptest_any(defined, cmp))
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_s64index_f64_with_svstnt1_scatter_s64index_f64() {
+    let mut storage = [0 as f64; 160usize];
+    let data = svcvt_f64_s64_x(
+        svptrue_b64(),
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let indices = svindex_s64(0, 1);
+    svstnt1_scatter_s64index_f64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1_gather_s64index_f64(svptrue_b64(), storage.as_ptr() as *const f64, indices);
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_s64index_s64_with_svstnt1_scatter_s64index_s64() {
+    let mut storage = [0 as i64; 160usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svstnt1_scatter_s64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1_gather_s64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_s64index_u64_with_svstnt1_scatter_s64index_u64() {
+    let mut storage = [0 as u64; 160usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svstnt1_scatter_s64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1_gather_s64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u64index_f64_with_svstnt1_scatter_u64index_f64() {
+    let mut storage = [0 as f64; 160usize];
+    let data = svcvt_f64_s64_x(
+        svptrue_b64(),
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let indices = svindex_u64(0, 1);
+    svstnt1_scatter_u64index_f64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1_gather_u64index_f64(svptrue_b64(), storage.as_ptr() as *const f64, indices);
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u64index_s64_with_svstnt1_scatter_u64index_s64() {
+    let mut storage = [0 as i64; 160usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svstnt1_scatter_u64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1_gather_u64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u64index_u64_with_svstnt1_scatter_u64index_u64() {
+    let mut storage = [0 as u64; 160usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svstnt1_scatter_u64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1_gather_u64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_s64offset_f64_with_svstnt1_scatter_s64offset_f64() {
+    let mut storage = [0 as f64; 160usize];
+    let data = svcvt_f64_s64_x(
+        svptrue_b64(),
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let offsets = svindex_s64(0, 8u32.try_into().unwrap());
+    svstnt1_scatter_s64offset_f64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1_gather_s64offset_f64(svptrue_b64(), storage.as_ptr() as *const f64, offsets);
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_s64offset_s64_with_svstnt1_scatter_s64offset_s64() {
+    let mut storage = [0 as i64; 160usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 8u32.try_into().unwrap());
+    svstnt1_scatter_s64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1_gather_s64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_s64offset_u64_with_svstnt1_scatter_s64offset_u64() {
+    let mut storage = [0 as u64; 160usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 8u32.try_into().unwrap());
+    svstnt1_scatter_s64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1_gather_s64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u32offset_f32_with_svstnt1_scatter_u32offset_f32() {
+    let mut storage = [0 as f32; 320usize];
+    let data = svcvt_f32_s32_x(
+        svptrue_b32(),
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let offsets = svindex_u32(0, 4u32.try_into().unwrap());
+    svstnt1_scatter_u32offset_f32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f32 || val == i as f32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1_gather_u32offset_f32(svptrue_b32(), storage.as_ptr() as *const f32, offsets);
+    assert_vector_matches_f32(
+        loaded,
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u32offset_s32_with_svstnt1_scatter_u32offset_s32() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 4u32.try_into().unwrap());
+    svstnt1_scatter_u32offset_s32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1_gather_u32offset_s32(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u32offset_u32_with_svstnt1_scatter_u32offset_u32() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 4u32.try_into().unwrap());
+    svstnt1_scatter_u32offset_u32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1_gather_u32offset_u32(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u64offset_f64_with_svstnt1_scatter_u64offset_f64() {
+    let mut storage = [0 as f64; 160usize];
+    let data = svcvt_f64_s64_x(
+        svptrue_b64(),
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    svstnt1_scatter_u64offset_f64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1_gather_u64offset_f64(svptrue_b64(), storage.as_ptr() as *const f64, offsets);
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u64offset_s64_with_svstnt1_scatter_u64offset_s64() {
+    let mut storage = [0 as i64; 160usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    svstnt1_scatter_u64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1_gather_u64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u64offset_u64_with_svstnt1_scatter_u64offset_u64() {
+    let mut storage = [0 as u64; 160usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    svstnt1_scatter_u64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1_gather_u64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u64base_f64_with_svstnt1_scatter_u64base_f64() {
+    let mut storage = [0 as f64; 160usize];
+    let data = svcvt_f64_s64_x(
+        svptrue_b64(),
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svstnt1_scatter_u64base_f64(svptrue_b64(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svldnt1_gather_u64base_f64(svptrue_b64(), bases);
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u64base_s64_with_svstnt1_scatter_u64base_s64() {
+    let mut storage = [0 as i64; 160usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svstnt1_scatter_u64base_s64(svptrue_b64(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svldnt1_gather_u64base_s64(svptrue_b64(), bases);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u64base_u64_with_svstnt1_scatter_u64base_u64() {
+    let mut storage = [0 as u64; 160usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svstnt1_scatter_u64base_u64(svptrue_b64(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svldnt1_gather_u64base_u64(svptrue_b64(), bases);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u32base_index_f32_with_svstnt1_scatter_u32base_index_f32() {
+    let mut storage = [0 as f32; 320usize];
+    let data = svcvt_f32_s32_x(
+        svptrue_b32(),
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let bases = svindex_u32(0, 4u32.try_into().unwrap());
+    svstnt1_scatter_u32base_index_f32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 / (4u32 as i64) + 1,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f32 || val == i as f32);
+    }
+    svsetffr();
+    let loaded = svldnt1_gather_u32base_index_f32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 / (4u32 as i64) + 1,
+    );
+    assert_vector_matches_f32(
+        loaded,
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u32base_index_s32_with_svstnt1_scatter_u32base_index_s32() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 4u32.try_into().unwrap());
+    svstnt1_scatter_u32base_index_s32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 / (4u32 as i64) + 1,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svldnt1_gather_u32base_index_s32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 / (4u32 as i64) + 1,
+    );
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u32base_index_u32_with_svstnt1_scatter_u32base_index_u32() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 4u32.try_into().unwrap());
+    svstnt1_scatter_u32base_index_u32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 / (4u32 as i64) + 1,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svldnt1_gather_u32base_index_u32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 / (4u32 as i64) + 1,
+    );
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u64base_index_f64_with_svstnt1_scatter_u64base_index_f64() {
+    let mut storage = [0 as f64; 160usize];
+    let data = svcvt_f64_s64_x(
+        svptrue_b64(),
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svstnt1_scatter_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svldnt1_gather_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap());
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u64base_index_s64_with_svstnt1_scatter_u64base_index_s64() {
+    let mut storage = [0 as i64; 160usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svstnt1_scatter_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svldnt1_gather_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u64base_index_u64_with_svstnt1_scatter_u64base_index_u64() {
+    let mut storage = [0 as u64; 160usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svstnt1_scatter_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svldnt1_gather_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u32base_offset_f32_with_svstnt1_scatter_u32base_offset_f32() {
+    let mut storage = [0 as f32; 320usize];
+    let data = svcvt_f32_s32_x(
+        svptrue_b32(),
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let bases = svindex_u32(0, 4u32.try_into().unwrap());
+    svstnt1_scatter_u32base_offset_f32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 + 4u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f32 || val == i as f32);
+    }
+    svsetffr();
+    let loaded = svldnt1_gather_u32base_offset_f32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 + 4u32 as i64,
+    );
+    assert_vector_matches_f32(
+        loaded,
+        svcvt_f32_s32_x(
+            svptrue_b32(),
+            svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u32base_offset_s32_with_svstnt1_scatter_u32base_offset_s32() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 4u32.try_into().unwrap());
+    svstnt1_scatter_u32base_offset_s32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 + 4u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svldnt1_gather_u32base_offset_s32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 + 4u32 as i64,
+    );
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u32base_offset_u32_with_svstnt1_scatter_u32base_offset_u32() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 4u32.try_into().unwrap());
+    svstnt1_scatter_u32base_offset_u32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 + 4u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded = svldnt1_gather_u32base_offset_u32(
+        svptrue_b32(),
+        bases,
+        storage.as_ptr() as i64 + 4u32 as i64,
+    );
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u64base_offset_f64_with_svstnt1_scatter_u64base_offset_f64() {
+    let mut storage = [0 as f64; 160usize];
+    let data = svcvt_f64_s64_x(
+        svptrue_b64(),
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svstnt1_scatter_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as f64 || val == i as f64);
+    }
+    svsetffr();
+    let loaded = svldnt1_gather_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap());
+    assert_vector_matches_f64(
+        loaded,
+        svcvt_f64_s64_x(
+            svptrue_b64(),
+            svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        ),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u64base_offset_s64_with_svstnt1_scatter_u64base_offset_s64() {
+    let mut storage = [0 as i64; 160usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svstnt1_scatter_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i64 || val == i as i64);
+    }
+    svsetffr();
+    let loaded = svldnt1_gather_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1_gather_u64base_offset_u64_with_svstnt1_scatter_u64base_offset_u64() {
+    let mut storage = [0 as u64; 160usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 8u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
+    svstnt1_scatter_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u64 || val == i as u64);
+    }
+    svsetffr();
+    let loaded = svldnt1_gather_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sb_gather_s64offset_s64_with_svstnt1b_scatter_s64offset_s64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 1u32.try_into().unwrap());
+    svstnt1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sb_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_s64offset_s64_with_svstnt1h_scatter_s64offset_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sw_gather_s64offset_s64_with_svstnt1w_scatter_s64offset_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 4u32.try_into().unwrap());
+    svstnt1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sb_gather_s64offset_u64_with_svstnt1b_scatter_s64offset_u64() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 1u32.try_into().unwrap());
+    svstnt1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sb_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_s64offset_u64_with_svstnt1h_scatter_s64offset_u64() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sw_gather_s64offset_u64_with_svstnt1w_scatter_s64offset_u64() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 4u32.try_into().unwrap());
+    svstnt1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sb_gather_u32offset_s32_with_svstnt1b_scatter_u32offset_s32() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 1u32.try_into().unwrap());
+    svstnt1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sb_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_u32offset_s32_with_svstnt1h_scatter_u32offset_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sb_gather_u32offset_u32_with_svstnt1b_scatter_u32offset_u32() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 1u32.try_into().unwrap());
+    svstnt1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sb_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_u32offset_u32_with_svstnt1h_scatter_u32offset_u32() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sb_gather_u64offset_s64_with_svstnt1b_scatter_u64offset_s64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    svstnt1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sb_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_u64offset_s64_with_svstnt1h_scatter_u64offset_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sw_gather_u64offset_s64_with_svstnt1w_scatter_u64offset_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    svstnt1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sb_gather_u64offset_u64_with_svstnt1b_scatter_u64offset_u64() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    svstnt1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sb_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_u64offset_u64_with_svstnt1h_scatter_u64offset_u64() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sw_gather_u64offset_u64_with_svstnt1w_scatter_u64offset_u64() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    svstnt1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sb_gather_u32base_offset_s32_with_svstnt1b_scatter_u32base_offset_s32() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 1u32.try_into().unwrap());
+    svstnt1b_scatter_u32base_offset_s32(
+        svptrue_b8(),
+        bases,
+        storage.as_ptr() as i64 + 1u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svldnt1sb_gather_u32base_offset_s32(
+        svptrue_b8(),
+        bases,
+        storage.as_ptr() as i64 + 1u32 as i64,
+    );
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_u32base_offset_s32_with_svstnt1h_scatter_u32base_offset_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_u32base_offset_s32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 + 2u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svldnt1sh_gather_u32base_offset_s32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 + 2u32 as i64,
+    );
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sb_gather_u32base_offset_u32_with_svstnt1b_scatter_u32base_offset_u32() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 1u32.try_into().unwrap());
+    svstnt1b_scatter_u32base_offset_u32(
+        svptrue_b8(),
+        bases,
+        storage.as_ptr() as i64 + 1u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svldnt1sb_gather_u32base_offset_u32(
+        svptrue_b8(),
+        bases,
+        storage.as_ptr() as i64 + 1u32 as i64,
+    );
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_u32base_offset_u32_with_svstnt1h_scatter_u32base_offset_u32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_u32base_offset_u32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 + 2u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svldnt1sh_gather_u32base_offset_u32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 + 2u32 as i64,
+    );
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sb_gather_u64base_offset_s64_with_svstnt1b_scatter_u64base_offset_s64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
+    svstnt1b_scatter_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svldnt1sb_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_u64base_offset_s64_with_svstnt1h_scatter_u64base_offset_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svstnt1h_scatter_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sw_gather_u64base_offset_s64_with_svstnt1w_scatter_u64base_offset_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svstnt1w_scatter_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sb_gather_u64base_offset_u64_with_svstnt1b_scatter_u64base_offset_u64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
+    svstnt1b_scatter_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svldnt1sb_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_u64base_offset_u64_with_svstnt1h_scatter_u64base_offset_u64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svstnt1h_scatter_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sw_gather_u64base_offset_u64_with_svstnt1w_scatter_u64base_offset_u64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svstnt1w_scatter_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sb_gather_u64base_s64_with_svstnt1b_scatter_u64base_s64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
+    svstnt1b_scatter_u64base_s64(svptrue_b8(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svldnt1sb_gather_u64base_s64(svptrue_b8(), bases);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_u64base_s64_with_svstnt1h_scatter_u64base_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svstnt1h_scatter_u64base_s64(svptrue_b16(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svldnt1sh_gather_u64base_s64(svptrue_b16(), bases);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sw_gather_u64base_s64_with_svstnt1w_scatter_u64base_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svstnt1w_scatter_u64base_s64(svptrue_b32(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svldnt1sw_gather_u64base_s64(svptrue_b32(), bases);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sb_gather_u64base_u64_with_svstnt1b_scatter_u64base_u64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
+    svstnt1b_scatter_u64base_u64(svptrue_b8(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svldnt1sb_gather_u64base_u64(svptrue_b8(), bases);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_u64base_u64_with_svstnt1h_scatter_u64base_u64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svstnt1h_scatter_u64base_u64(svptrue_b16(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svldnt1sh_gather_u64base_u64(svptrue_b16(), bases);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sw_gather_u64base_u64_with_svstnt1w_scatter_u64base_u64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svstnt1w_scatter_u64base_u64(svptrue_b32(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svldnt1sw_gather_u64base_u64(svptrue_b32(), bases);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_s64index_s64_with_svstnt1h_scatter_s64index_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svstnt1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sw_gather_s64index_s64_with_svstnt1w_scatter_s64index_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svstnt1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_s64index_u64_with_svstnt1h_scatter_s64index_u64() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svstnt1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sw_gather_s64index_u64_with_svstnt1w_scatter_s64index_u64() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svstnt1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_u64index_s64_with_svstnt1h_scatter_u64index_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svstnt1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sw_gather_u64index_s64_with_svstnt1w_scatter_u64index_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svstnt1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_u64index_u64_with_svstnt1h_scatter_u64index_u64() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svstnt1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sw_gather_u64index_u64_with_svstnt1w_scatter_u64index_u64() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svstnt1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1sw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_u32base_index_s32_with_svstnt1h_scatter_u32base_index_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_u32base_index_s32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 / (2u32 as i64) + 1,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svldnt1sh_gather_u32base_index_s32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 / (2u32 as i64) + 1,
+    );
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_u32base_index_u32_with_svstnt1h_scatter_u32base_index_u32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_u32base_index_u32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 / (2u32 as i64) + 1,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svldnt1sh_gather_u32base_index_u32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 / (2u32 as i64) + 1,
+    );
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_u64base_index_s64_with_svstnt1h_scatter_u64base_index_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svstnt1h_scatter_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svldnt1sh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sw_gather_u64base_index_s64_with_svstnt1w_scatter_u64base_index_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svstnt1w_scatter_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svldnt1sw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sh_gather_u64base_index_u64_with_svstnt1h_scatter_u64base_index_u64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svstnt1h_scatter_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svldnt1sh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1sw_gather_u64base_index_u64_with_svstnt1w_scatter_u64base_index_u64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svstnt1w_scatter_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svldnt1sw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1ub_gather_s64offset_s64_with_svstnt1b_scatter_s64offset_s64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 1u32.try_into().unwrap());
+    svstnt1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1ub_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_s64offset_s64_with_svstnt1h_scatter_s64offset_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uw_gather_s64offset_s64_with_svstnt1w_scatter_s64offset_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 4u32.try_into().unwrap());
+    svstnt1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1ub_gather_s64offset_u64_with_svstnt1b_scatter_s64offset_u64() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 1u32.try_into().unwrap());
+    svstnt1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1ub_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_s64offset_u64_with_svstnt1h_scatter_s64offset_u64() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uw_gather_s64offset_u64_with_svstnt1w_scatter_s64offset_u64() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_s64(0, 4u32.try_into().unwrap());
+    svstnt1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1ub_gather_u32offset_s32_with_svstnt1b_scatter_u32offset_s32() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 1u32.try_into().unwrap());
+    svstnt1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1ub_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_u32offset_s32_with_svstnt1h_scatter_u32offset_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1ub_gather_u32offset_u32_with_svstnt1b_scatter_u32offset_u32() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 1u32.try_into().unwrap());
+    svstnt1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1ub_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_u32offset_u32_with_svstnt1h_scatter_u32offset_u32() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u32(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1ub_gather_u64offset_s64_with_svstnt1b_scatter_u64offset_s64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    svstnt1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1ub_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_u64offset_s64_with_svstnt1h_scatter_u64offset_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uw_gather_u64offset_s64_with_svstnt1w_scatter_u64offset_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    svstnt1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1ub_gather_u64offset_u64_with_svstnt1b_scatter_u64offset_u64() {
+    let mut storage = [0 as u8; 1280usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    svstnt1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u8 || val == i as u8);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1ub_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_u64offset_u64_with_svstnt1h_scatter_u64offset_u64() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uw_gather_u64offset_u64_with_svstnt1w_scatter_u64offset_u64() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    svstnt1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1ub_gather_u32base_offset_s32_with_svstnt1b_scatter_u32base_offset_s32() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 1u32.try_into().unwrap());
+    svstnt1b_scatter_u32base_offset_s32(
+        svptrue_b8(),
+        bases,
+        storage.as_ptr() as i64 + 1u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svldnt1ub_gather_u32base_offset_s32(
+        svptrue_b8(),
+        bases,
+        storage.as_ptr() as i64 + 1u32 as i64,
+    );
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_u32base_offset_s32_with_svstnt1h_scatter_u32base_offset_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_u32base_offset_s32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 + 2u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svldnt1uh_gather_u32base_offset_s32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 + 2u32 as i64,
+    );
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1ub_gather_u32base_offset_u32_with_svstnt1b_scatter_u32base_offset_u32() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 1u32.try_into().unwrap());
+    svstnt1b_scatter_u32base_offset_u32(
+        svptrue_b8(),
+        bases,
+        storage.as_ptr() as i64 + 1u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svldnt1ub_gather_u32base_offset_u32(
+        svptrue_b8(),
+        bases,
+        storage.as_ptr() as i64 + 1u32 as i64,
+    );
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_u32base_offset_u32_with_svstnt1h_scatter_u32base_offset_u32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_u32base_offset_u32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 + 2u32 as i64,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svldnt1uh_gather_u32base_offset_u32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 + 2u32 as i64,
+    );
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1ub_gather_u64base_offset_s64_with_svstnt1b_scatter_u64base_offset_s64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
+    svstnt1b_scatter_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svldnt1ub_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_u64base_offset_s64_with_svstnt1h_scatter_u64base_offset_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svstnt1h_scatter_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uw_gather_u64base_offset_s64_with_svstnt1w_scatter_u64base_offset_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svstnt1w_scatter_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1ub_gather_u64base_offset_u64_with_svstnt1b_scatter_u64base_offset_u64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
+    svstnt1b_scatter_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svldnt1ub_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_u64base_offset_u64_with_svstnt1h_scatter_u64base_offset_u64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svstnt1h_scatter_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uw_gather_u64base_offset_u64_with_svstnt1w_scatter_u64base_offset_u64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svstnt1w_scatter_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1ub_gather_u64base_s64_with_svstnt1b_scatter_u64base_s64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
+    svstnt1b_scatter_u64base_s64(svptrue_b8(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svldnt1ub_gather_u64base_s64(svptrue_b8(), bases);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_u64base_s64_with_svstnt1h_scatter_u64base_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svstnt1h_scatter_u64base_s64(svptrue_b16(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svldnt1uh_gather_u64base_s64(svptrue_b16(), bases);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uw_gather_u64base_s64_with_svstnt1w_scatter_u64base_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svstnt1w_scatter_u64base_s64(svptrue_b32(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svldnt1uw_gather_u64base_s64(svptrue_b32(), bases);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1ub_gather_u64base_u64_with_svstnt1b_scatter_u64base_u64() {
+    let mut storage = [0 as i8; 1280usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 1u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
+    svstnt1b_scatter_u64base_u64(svptrue_b8(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i8 || val == i as i8);
+    }
+    svsetffr();
+    let loaded = svldnt1ub_gather_u64base_u64(svptrue_b8(), bases);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_u64base_u64_with_svstnt1h_scatter_u64base_u64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svstnt1h_scatter_u64base_u64(svptrue_b16(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svldnt1uh_gather_u64base_u64(svptrue_b16(), bases);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uw_gather_u64base_u64_with_svstnt1w_scatter_u64base_u64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svstnt1w_scatter_u64base_u64(svptrue_b32(), bases, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svldnt1uw_gather_u64base_u64(svptrue_b32(), bases);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_s64index_s64_with_svstnt1h_scatter_s64index_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svstnt1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uw_gather_s64index_s64_with_svstnt1w_scatter_s64index_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svstnt1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_s64index_u64_with_svstnt1h_scatter_s64index_u64() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svstnt1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uw_gather_s64index_u64_with_svstnt1w_scatter_s64index_u64() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_s64(0, 1);
+    svstnt1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_u64index_s64_with_svstnt1h_scatter_u64index_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svstnt1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uw_gather_u64index_s64_with_svstnt1w_scatter_u64index_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svstnt1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices);
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_u64index_u64_with_svstnt1h_scatter_u64index_u64() {
+    let mut storage = [0 as u16; 640usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svstnt1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u16 || val == i as u16);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uw_gather_u64index_u64_with_svstnt1w_scatter_u64index_u64() {
+    let mut storage = [0 as u32; 320usize];
+    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let indices = svindex_u64(0, 1);
+    svstnt1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as u32 || val == i as u32);
+    }
+    svsetffr();
+    let loaded =
+        svldnt1uw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices);
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_u32base_index_s32_with_svstnt1h_scatter_u32base_index_s32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_u32base_index_s32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 / (2u32 as i64) + 1,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svldnt1uh_gather_u32base_index_s32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 / (2u32 as i64) + 1,
+    );
+    assert_vector_matches_i32(
+        loaded,
+        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_u32base_index_u32_with_svstnt1h_scatter_u32base_index_u32() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svindex_u32(0, 2u32.try_into().unwrap());
+    svstnt1h_scatter_u32base_index_u32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 / (2u32 as i64) + 1,
+        data,
+    );
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svldnt1uh_gather_u32base_index_u32(
+        svptrue_b16(),
+        bases,
+        storage.as_ptr() as i64 / (2u32 as i64) + 1,
+    );
+    assert_vector_matches_u32(
+        loaded,
+        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_u64base_index_s64_with_svstnt1h_scatter_u64base_index_s64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svstnt1h_scatter_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svldnt1uh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uw_gather_u64base_index_s64_with_svstnt1w_scatter_u64base_index_s64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svstnt1w_scatter_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svldnt1uw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap());
+    assert_vector_matches_i64(
+        loaded,
+        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uh_gather_u64base_index_u64_with_svstnt1h_scatter_u64base_index_u64() {
+    let mut storage = [0 as i16; 640usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 2u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
+    svstnt1h_scatter_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i16 || val == i as i16);
+    }
+    svsetffr();
+    let loaded = svldnt1uh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
+#[simd_test(enable = "sve,sve2")]
+unsafe fn test_svldnt1uw_gather_u64base_index_u64_with_svstnt1w_scatter_u64base_index_u64() {
+    let mut storage = [0 as i32; 320usize];
+    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let bases = svdup_n_u64(storage.as_ptr() as u64);
+    let offsets = svindex_u64(0, 4u32.try_into().unwrap());
+    let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
+    svstnt1w_scatter_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap(), data);
+    for (i, &val) in storage.iter().enumerate() {
+        assert!(val == 0 as i32 || val == i as i32);
+    }
+    svsetffr();
+    let loaded = svldnt1uw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap());
+    assert_vector_matches_u64(
+        loaded,
+        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+    );
+}
diff --git a/crates/core_arch/src/aarch64/sve/mod.rs b/crates/core_arch/src/aarch64/sve/mod.rs
new file mode 100644
index 0000000000..167351fc71
--- /dev/null
+++ b/crates/core_arch/src/aarch64/sve/mod.rs
@@ -0,0 +1,33 @@
+//! SVE intrinsics
+//!
+//! ## Safety in this module
+//!
+//! Under [`target_feature_11`][] rules, several of these intrinsics are safe to call as long as
+//! the caller already declares the necessary target features. In general:
+//!
+//! - Intrinsics that access memory or handle pointers are `unsafe`.
+//!   - Most of these are memory accesses, and are treated just like any other pointer dereference.
+//!   - A few, such as the prefetch hints, are not obviously `unsafe`, but perform pointer
+//!     arithmetic (similar to [`pointer::offset`]) that might be.
+//! - Intrinsics that can produce undefined values are `unsafe`. This is limited to the explicit
+//!   `svundef_*` intrinsics. Note that these behave like [`core::mem::uninitialized`], and
+//!   might be similarly unsound, but this requires further analysis.
+//! - All other intrinsics operate in a well-defined manner, and are safe (subject to target
+//!   feature checks).
+//!   - This includes intrinsics with the "don't-care" predication strategy (with a `_x` suffix).
+//!     As in [ACLE][], the value of inactive lanes is unspecified, but Rust intrinsics always
+//!     ensures that they are initialised to _something_.
+//!
+//! [`target_feature_11`]: https://rust-lang.github.io/rfcs/2396-target-feature-1.1.html
+//! [`pointer::offset`]: pointer#method.offset
+//! [ACLE]: https://github.com/ARM-software/acle
+
+mod sve;
+mod sve2;
+mod types;
+
+use crate::core_arch::simd_llvm::*;
+
+pub use sve::*;
+pub use sve2::*;
+pub use types::*;
diff --git a/crates/core_arch/src/aarch64/sve/sve.rs b/crates/core_arch/src/aarch64/sve/sve.rs
new file mode 100644
index 0000000000..bd02cc50d4
--- /dev/null
+++ b/crates/core_arch/src/aarch64/sve/sve.rs
@@ -0,0 +1,46098 @@
+// This code is automatically generated. DO NOT MODIFY.
+//
+// Instead, modify `crates/stdarch-gen2/spec/` and run the following command to re-generate this file:
+//
+// ```
+// cargo run --bin=stdarch-gen2 -- crates/stdarch-gen2/spec
+// ```
+#![allow(improper_ctypes)]
+
+#[cfg(test)]
+use stdarch_test::assert_instr;
+
+use super::*;
+use crate::core_arch::arch::aarch64::*;
+
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fabd))]
+pub fn svabd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv4f32")]
+        fn _svabd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svabd_f32_m(pg.into(), op1, op2) }
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fabd))]
+pub fn svabd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svabd_f32_m(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fabd))]
+pub fn svabd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svabd_f32_m(pg, op1, op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fabd))]
+pub fn svabd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svabd_f32_x(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fabd))]
+pub fn svabd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svabd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fabd))]
+pub fn svabd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svabd_f32_z(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fabd))]
+pub fn svabd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabd.nxv2f64")]
+        fn _svabd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svabd_f64_m(pg.into(), op1, op2) }
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fabd))]
+pub fn svabd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svabd_f64_m(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fabd))]
+pub fn svabd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svabd_f64_m(pg, op1, op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fabd))]
+pub fn svabd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svabd_f64_x(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fabd))]
+pub fn svabd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svabd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fabd))]
+pub fn svabd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svabd_f64_z(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv16i8")]
+        fn _svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svabd_s8_m(pg, op1, op2) }
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svabd_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svabd_s8_m(pg, op1, op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svabd_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svabd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svabd_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv8i16")]
+        fn _svabd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svabd_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svabd_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svabd_s16_m(pg, op1, op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svabd_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svabd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svabd_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv4i32")]
+        fn _svabd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svabd_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svabd_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svabd_s32_m(pg, op1, op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svabd_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svabd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svabd_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabd.nxv2i64")]
+        fn _svabd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svabd_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svabd_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svabd_s64_m(pg, op1, op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svabd_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svabd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sabd))]
+pub fn svabd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svabd_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv16i8")]
+        fn _svabd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svabd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svabd_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svabd_u8_m(pg, op1, op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svabd_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svabd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svabd_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv8i16")]
+        fn _svabd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svabd_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svabd_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svabd_u16_m(pg, op1, op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svabd_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svabd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svabd_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv4i32")]
+        fn _svabd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svabd_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svabd_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svabd_u32_m(pg, op1, op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svabd_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svabd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svabd_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabd.nxv2i64")]
+        fn _svabd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svabd_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svabd_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svabd_u64_m(pg, op1, op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svabd_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svabd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Absolute difference"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uabd))]
+pub fn svabd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svabd_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fabs))]
+pub fn svabs_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv4f32")]
+        fn _svabs_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svabs_f32_m(inactive, pg.into(), op) }
+}
+#[doc = "Absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fabs))]
+pub fn svabs_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svabs_f32_m(op, pg, op)
+}
+#[doc = "Absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fabs))]
+pub fn svabs_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svabs_f32_m(svdup_n_f32(0.0), pg, op)
+}
+#[doc = "Absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fabs))]
+pub fn svabs_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fabs.nxv2f64")]
+        fn _svabs_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svabs_f64_m(inactive, pg.into(), op) }
+}
+#[doc = "Absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fabs))]
+pub fn svabs_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svabs_f64_m(op, pg, op)
+}
+#[doc = "Absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fabs))]
+pub fn svabs_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svabs_f64_m(svdup_n_f64(0.0), pg, op)
+}
+#[doc = "Absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(abs))]
+pub fn svabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv16i8")]
+        fn _svabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
+    }
+    unsafe { _svabs_s8_m(inactive, pg, op) }
+}
+#[doc = "Absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(abs))]
+pub fn svabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t {
+    svabs_s8_m(op, pg, op)
+}
+#[doc = "Absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(abs))]
+pub fn svabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t {
+    svabs_s8_m(svdup_n_s8(0), pg, op)
+}
+#[doc = "Absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(abs))]
+pub fn svabs_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv8i16")]
+        fn _svabs_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
+    }
+    unsafe { _svabs_s16_m(inactive, pg.into(), op) }
+}
+#[doc = "Absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(abs))]
+pub fn svabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t {
+    svabs_s16_m(op, pg, op)
+}
+#[doc = "Absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(abs))]
+pub fn svabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t {
+    svabs_s16_m(svdup_n_s16(0), pg, op)
+}
+#[doc = "Absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(abs))]
+pub fn svabs_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv4i32")]
+        fn _svabs_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svabs_s32_m(inactive, pg.into(), op) }
+}
+#[doc = "Absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(abs))]
+pub fn svabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svabs_s32_m(op, pg, op)
+}
+#[doc = "Absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(abs))]
+pub fn svabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svabs_s32_m(svdup_n_s32(0), pg, op)
+}
+#[doc = "Absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(abs))]
+pub fn svabs_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.abs.nxv2i64")]
+        fn _svabs_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svabs_s64_m(inactive, pg.into(), op) }
+}
+#[doc = "Absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(abs))]
+pub fn svabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svabs_s64_m(op, pg, op)
+}
+#[doc = "Absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(abs))]
+pub fn svabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svabs_s64_m(svdup_n_s64(0), pg, op)
+}
+#[doc = "Absolute compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(facge))]
+pub fn svacge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facge.nxv4f32")]
+        fn _svacge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t;
+    }
+    unsafe { _svacge_f32(pg.into(), op1, op2).into() }
+}
+#[doc = "Absolute compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(facge))]
+pub fn svacge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
+    svacge_f32(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Absolute compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(facge))]
+pub fn svacge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facge.nxv2f64")]
+        fn _svacge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t;
+    }
+    unsafe { _svacge_f64(pg.into(), op1, op2).into() }
+}
+#[doc = "Absolute compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(facge))]
+pub fn svacge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
+    svacge_f64(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Absolute compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(facgt))]
+pub fn svacgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facgt.nxv4f32")]
+        fn _svacgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t;
+    }
+    unsafe { _svacgt_f32(pg.into(), op1, op2).into() }
+}
+#[doc = "Absolute compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(facgt))]
+pub fn svacgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
+    svacgt_f32(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Absolute compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(facgt))]
+pub fn svacgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.facgt.nxv2f64")]
+        fn _svacgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t;
+    }
+    unsafe { _svacgt_f64(pg.into(), op1, op2).into() }
+}
+#[doc = "Absolute compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(facgt))]
+pub fn svacgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
+    svacgt_f64(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Absolute compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(facge))]
+pub fn svacle_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
+    svacge_f32(pg, op2, op1)
+}
+#[doc = "Absolute compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(facge))]
+pub fn svacle_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
+    svacle_f32(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Absolute compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(facge))]
+pub fn svacle_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
+    svacge_f64(pg, op2, op1)
+}
+#[doc = "Absolute compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(facge))]
+pub fn svacle_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
+    svacle_f64(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Absolute compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(facgt))]
+pub fn svaclt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
+    svacgt_f32(pg, op2, op1)
+}
+#[doc = "Absolute compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(facgt))]
+pub fn svaclt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
+    svaclt_f32(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Absolute compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(facgt))]
+pub fn svaclt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
+    svacgt_f64(pg, op2, op1)
+}
+#[doc = "Absolute compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(facgt))]
+pub fn svaclt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
+    svaclt_f64(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fadd))]
+pub fn svadd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadd.nxv4f32")]
+        fn _svadd_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svadd_f32_m(pg.into(), op1, op2) }
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fadd))]
+pub fn svadd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svadd_f32_m(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fadd))]
+pub fn svadd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svadd_f32_m(pg, op1, op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fadd))]
+pub fn svadd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svadd_f32_x(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fadd))]
+pub fn svadd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svadd_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fadd))]
+pub fn svadd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svadd_f32_z(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fadd))]
+pub fn svadd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadd.nxv2f64")]
+        fn _svadd_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svadd_f64_m(pg.into(), op1, op2) }
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fadd))]
+pub fn svadd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svadd_f64_m(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fadd))]
+pub fn svadd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svadd_f64_m(pg, op1, op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fadd))]
+pub fn svadd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svadd_f64_x(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fadd))]
+pub fn svadd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svadd_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fadd))]
+pub fn svadd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svadd_f64_z(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv16i8")]
+        fn _svadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svadd_s8_m(pg, op1, op2) }
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svadd_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svadd_s8_m(pg, op1, op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svadd_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svadd_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv8i16")]
+        fn _svadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svadd_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svadd_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svadd_s16_m(pg, op1, op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svadd_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svadd_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv4i32")]
+        fn _svadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svadd_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svadd_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svadd_s32_m(pg, op1, op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svadd_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svadd_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.add.nxv2i64")]
+        fn _svadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svadd_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svadd_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svadd_s64_m(pg, op1, op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svadd_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svadd_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svadd_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svadd_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svadd_u8_m(pg, op1, op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svadd_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svadd_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svadd_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svadd_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svadd_u16_m(pg, op1, op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svadd_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svadd_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svadd_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svadd_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svadd_u32_m(pg, op1, op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svadd_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svadd_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svadd_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svadd_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svadd_u64_m(pg, op1, op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svadd_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(add))]
+pub fn svadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svadd_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Add reduction (strictly-ordered)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fadda))]
+pub fn svadda_f32(pg: svbool_t, initial: f32, op: svfloat32_t) -> f32 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadda.nxv4f32")]
+        fn _svadda_f32(pg: svbool4_t, initial: f32, op: svfloat32_t) -> f32;
+    }
+    unsafe { _svadda_f32(pg.into(), initial, op) }
+}
+#[doc = "Add reduction (strictly-ordered)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fadda))]
+pub fn svadda_f64(pg: svbool_t, initial: f64, op: svfloat64_t) -> f64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fadda.nxv2f64")]
+        fn _svadda_f64(pg: svbool2_t, initial: f64, op: svfloat64_t) -> f64;
+    }
+    unsafe { _svadda_f64(pg.into(), initial, op) }
+}
+#[doc = "Add reduction"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(faddv))]
+pub fn svaddv_f32(pg: svbool_t, op: svfloat32_t) -> f32 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv4f32")]
+        fn _svaddv_f32(pg: svbool4_t, op: svfloat32_t) -> f32;
+    }
+    unsafe { _svaddv_f32(pg.into(), op) }
+}
+#[doc = "Add reduction"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(faddv))]
+pub fn svaddv_f64(pg: svbool_t, op: svfloat64_t) -> f64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddv.nxv2f64")]
+        fn _svaddv_f64(pg: svbool2_t, op: svfloat64_t) -> f64;
+    }
+    unsafe { _svaddv_f64(pg.into(), op) }
+}
+#[doc = "Add reduction"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uaddv))]
+pub fn svaddv_s64(pg: svbool_t, op: svint64_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv2i64")]
+        fn _svaddv_s64(pg: svbool2_t, op: svint64_t) -> i64;
+    }
+    unsafe { _svaddv_s64(pg.into(), op) }
+}
+#[doc = "Add reduction"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uaddv))]
+pub fn svaddv_u64(pg: svbool_t, op: svuint64_t) -> u64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv2i64")]
+        fn _svaddv_u64(pg: svbool2_t, op: svint64_t) -> i64;
+    }
+    unsafe { _svaddv_u64(pg.into(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Add reduction"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(saddv))]
+pub fn svaddv_s8(pg: svbool_t, op: svint8_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv16i8")]
+        fn _svaddv_s8(pg: svbool_t, op: svint8_t) -> i64;
+    }
+    unsafe { _svaddv_s8(pg, op) }
+}
+#[doc = "Add reduction"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(saddv))]
+pub fn svaddv_s16(pg: svbool_t, op: svint16_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv8i16")]
+        fn _svaddv_s16(pg: svbool8_t, op: svint16_t) -> i64;
+    }
+    unsafe { _svaddv_s16(pg.into(), op) }
+}
+#[doc = "Add reduction"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(saddv))]
+pub fn svaddv_s32(pg: svbool_t, op: svint32_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddv.nxv4i32")]
+        fn _svaddv_s32(pg: svbool4_t, op: svint32_t) -> i64;
+    }
+    unsafe { _svaddv_s32(pg.into(), op) }
+}
+#[doc = "Add reduction"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uaddv))]
+pub fn svaddv_u8(pg: svbool_t, op: svuint8_t) -> u64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv16i8")]
+        fn _svaddv_u8(pg: svbool_t, op: svint8_t) -> i64;
+    }
+    unsafe { _svaddv_u8(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Add reduction"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uaddv))]
+pub fn svaddv_u16(pg: svbool_t, op: svuint16_t) -> u64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv8i16")]
+        fn _svaddv_u16(pg: svbool8_t, op: svint16_t) -> i64;
+    }
+    unsafe { _svaddv_u16(pg.into(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Add reduction"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uaddv))]
+pub fn svaddv_u32(pg: svbool_t, op: svuint32_t) -> u64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddv.nxv4i32")]
+        fn _svaddv_u32(pg: svbool4_t, op: svint32_t) -> i64;
+    }
+    unsafe { _svaddv_u32(pg.into(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Compute vector addresses for 8-bit data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u32base]_[s32]offset)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(adr))]
+pub fn svadrb_u32base_s32offset(bases: svuint32_t, offsets: svint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrb.nxv4i32")]
+        fn _svadrb_u32base_s32offset(bases: svint32_t, offsets: svint32_t) -> svint32_t;
+    }
+    unsafe { _svadrb_u32base_s32offset(bases.as_signed(), offsets).as_unsigned() }
+}
+#[doc = "Compute vector addresses for 16-bit data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u32base]_[s32]index)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(adr))]
+pub fn svadrh_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrh.nxv4i32")]
+        fn _svadrh_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t;
+    }
+    unsafe { _svadrh_u32base_s32index(bases.as_signed(), indices).as_unsigned() }
+}
+#[doc = "Compute vector addresses for 32-bit data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u32base]_[s32]index)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(adr))]
+pub fn svadrw_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrw.nxv4i32")]
+        fn _svadrw_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t;
+    }
+    unsafe { _svadrw_u32base_s32index(bases.as_signed(), indices).as_unsigned() }
+}
+#[doc = "Compute vector addresses for 64-bit data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u32base]_[s32]index)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(adr))]
+pub fn svadrd_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrd.nxv4i32")]
+        fn _svadrd_u32base_s32index(bases: svint32_t, indices: svint32_t) -> svint32_t;
+    }
+    unsafe { _svadrd_u32base_s32index(bases.as_signed(), indices).as_unsigned() }
+}
+#[doc = "Compute vector addresses for 8-bit data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u32base]_[u32]offset)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(adr))]
+pub fn svadrb_u32base_u32offset(bases: svuint32_t, offsets: svuint32_t) -> svuint32_t {
+    unsafe { svadrb_u32base_s32offset(bases, offsets.as_signed()) }
+}
+#[doc = "Compute vector addresses for 16-bit data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u32base]_[u32]index)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(adr))]
+pub fn svadrh_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t {
+    unsafe { svadrh_u32base_s32index(bases, indices.as_signed()) }
+}
+#[doc = "Compute vector addresses for 32-bit data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u32base]_[u32]index)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(adr))]
+pub fn svadrw_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t {
+    unsafe { svadrw_u32base_s32index(bases, indices.as_signed()) }
+}
+#[doc = "Compute vector addresses for 64-bit data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u32base]_[u32]index)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(adr))]
+pub fn svadrd_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint32_t {
+    unsafe { svadrd_u32base_s32index(bases, indices.as_signed()) }
+}
+#[doc = "Compute vector addresses for 8-bit data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u64base]_[s64]offset)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(adr))]
+pub fn svadrb_u64base_s64offset(bases: svuint64_t, offsets: svint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrb.nxv2i64")]
+        fn _svadrb_u64base_s64offset(bases: svint64_t, offsets: svint64_t) -> svint64_t;
+    }
+    unsafe { _svadrb_u64base_s64offset(bases.as_signed(), offsets).as_unsigned() }
+}
+#[doc = "Compute vector addresses for 16-bit data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u64base]_[s64]index)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(adr))]
+pub fn svadrh_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrh.nxv2i64")]
+        fn _svadrh_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t;
+    }
+    unsafe { _svadrh_u64base_s64index(bases.as_signed(), indices).as_unsigned() }
+}
+#[doc = "Compute vector addresses for 32-bit data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u64base]_[s64]index)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(adr))]
+pub fn svadrw_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrw.nxv2i64")]
+        fn _svadrw_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t;
+    }
+    unsafe { _svadrw_u64base_s64index(bases.as_signed(), indices).as_unsigned() }
+}
+#[doc = "Compute vector addresses for 64-bit data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u64base]_[s64]index)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(adr))]
+pub fn svadrd_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adrd.nxv2i64")]
+        fn _svadrd_u64base_s64index(bases: svint64_t, indices: svint64_t) -> svint64_t;
+    }
+    unsafe { _svadrd_u64base_s64index(bases.as_signed(), indices).as_unsigned() }
+}
+#[doc = "Compute vector addresses for 8-bit data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u64base]_[u64]offset)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(adr))]
+pub fn svadrb_u64base_u64offset(bases: svuint64_t, offsets: svuint64_t) -> svuint64_t {
+    unsafe { svadrb_u64base_s64offset(bases, offsets.as_signed()) }
+}
+#[doc = "Compute vector addresses for 16-bit data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u64base]_[u64]index)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(adr))]
+pub fn svadrh_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t {
+    unsafe { svadrh_u64base_s64index(bases, indices.as_signed()) }
+}
+#[doc = "Compute vector addresses for 32-bit data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u64base]_[u64]index)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(adr))]
+pub fn svadrw_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t {
+    unsafe { svadrw_u64base_s64index(bases, indices.as_signed()) }
+}
+#[doc = "Compute vector addresses for 64-bit data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u64base]_[u64]index)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(adr))]
+pub fn svadrd_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint64_t {
+    unsafe { svadrd_u64base_s64index(bases, indices.as_signed()) }
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_b]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.z.nvx16i1")]
+        fn _svand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
+    }
+    unsafe { _svand_b_z(pg, op1, op2) }
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv16i8")]
+        fn _svand_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svand_s8_m(pg, op1, op2) }
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svand_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svand_s8_m(pg, op1, op2)
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svand_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svand_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svand_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv8i16")]
+        fn _svand_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svand_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svand_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svand_s16_m(pg, op1, op2)
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svand_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svand_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svand_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv4i32")]
+        fn _svand_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svand_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svand_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svand_s32_m(pg, op1, op2)
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svand_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svand_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svand_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.and.nxv2i64")]
+        fn _svand_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svand_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svand_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svand_s64_m(pg, op1, op2)
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svand_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svand_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svand_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svand_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svand_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svand_u8_m(pg, op1, op2)
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svand_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svand_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svand_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svand_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svand_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svand_u16_m(pg, op1, op2)
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svand_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svand_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svand_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svand_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svand_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svand_u32_m(pg, op1, op2)
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svand_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svand_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svand_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svand_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svand_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svand_u64_m(pg, op1, op2)
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svand_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svand_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Bitwise AND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(and))]
+pub fn svand_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svand_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Bitwise AND reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(andv))]
+pub fn svandv_s8(pg: svbool_t, op: svint8_t) -> i8 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv16i8")]
+        fn _svandv_s8(pg: svbool_t, op: svint8_t) -> i8;
+    }
+    unsafe { _svandv_s8(pg, op) }
+}
+#[doc = "Bitwise AND reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(andv))]
+pub fn svandv_s16(pg: svbool_t, op: svint16_t) -> i16 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv8i16")]
+        fn _svandv_s16(pg: svbool8_t, op: svint16_t) -> i16;
+    }
+    unsafe { _svandv_s16(pg.into(), op) }
+}
+#[doc = "Bitwise AND reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(andv))]
+pub fn svandv_s32(pg: svbool_t, op: svint32_t) -> i32 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv4i32")]
+        fn _svandv_s32(pg: svbool4_t, op: svint32_t) -> i32;
+    }
+    unsafe { _svandv_s32(pg.into(), op) }
+}
+#[doc = "Bitwise AND reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(andv))]
+pub fn svandv_s64(pg: svbool_t, op: svint64_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.andv.nxv2i64")]
+        fn _svandv_s64(pg: svbool2_t, op: svint64_t) -> i64;
+    }
+    unsafe { _svandv_s64(pg.into(), op) }
+}
+#[doc = "Bitwise AND reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(andv))]
+pub fn svandv_u8(pg: svbool_t, op: svuint8_t) -> u8 {
+    unsafe { svandv_s8(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise AND reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(andv))]
+pub fn svandv_u16(pg: svbool_t, op: svuint16_t) -> u16 {
+    unsafe { svandv_s16(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise AND reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(andv))]
+pub fn svandv_u32(pg: svbool_t, op: svuint32_t) -> u32 {
+    unsafe { svandv_s32(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise AND reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(andv))]
+pub fn svandv_u64(pg: svbool_t, op: svuint64_t) -> u64 {
+    unsafe { svandv_s64(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv16i8")]
+        fn _svasr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svasr_s8_m(pg, op1, op2.as_signed()) }
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t {
+    svasr_s8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t {
+    svasr_s8_m(pg, op1, op2)
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t {
+    svasr_s8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t {
+    svasr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t {
+    svasr_s8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv8i16")]
+        fn _svasr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svasr_s16_m(pg.into(), op1, op2.as_signed()) }
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t {
+    svasr_s16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t {
+    svasr_s16_m(pg, op1, op2)
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t {
+    svasr_s16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t {
+    svasr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t {
+    svasr_s16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv4i32")]
+        fn _svasr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svasr_s32_m(pg.into(), op1, op2.as_signed()) }
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t {
+    svasr_s32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t {
+    svasr_s32_m(pg, op1, op2)
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t {
+    svasr_s32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t {
+    svasr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t {
+    svasr_s32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asr.nxv2i64")]
+        fn _svasr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svasr_s64_m(pg.into(), op1, op2.as_signed()) }
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t {
+    svasr_s64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t {
+    svasr_s64_m(pg, op1, op2)
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t {
+    svasr_s64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t {
+    svasr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t {
+    svasr_s64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.asr.wide.nxv16i8"
+        )]
+        fn _svasr_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t;
+    }
+    unsafe { _svasr_wide_s8_m(pg, op1, op2.as_signed()) }
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_wide_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t {
+    svasr_wide_s8_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_wide_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t {
+    svasr_wide_s8_m(pg, op1, op2)
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_wide_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t {
+    svasr_wide_s8_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_wide_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t {
+    svasr_wide_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_wide_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t {
+    svasr_wide_s8_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_wide_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.asr.wide.nxv8i16"
+        )]
+        fn _svasr_wide_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t;
+    }
+    unsafe { _svasr_wide_s16_m(pg.into(), op1, op2.as_signed()) }
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_wide_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t {
+    svasr_wide_s16_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_wide_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t {
+    svasr_wide_s16_m(pg, op1, op2)
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_wide_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t {
+    svasr_wide_s16_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_wide_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t {
+    svasr_wide_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_wide_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t {
+    svasr_wide_s16_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_wide_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.asr.wide.nxv4i32"
+        )]
+        fn _svasr_wide_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t;
+    }
+    unsafe { _svasr_wide_s32_m(pg.into(), op1, op2.as_signed()) }
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_wide_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t {
+    svasr_wide_s32_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_wide_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t {
+    svasr_wide_s32_m(pg, op1, op2)
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_wide_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t {
+    svasr_wide_s32_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_wide_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t {
+    svasr_wide_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Arithmetic shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asr))]
+pub fn svasr_wide_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t {
+    svasr_wide_s32_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Arithmetic shift right for divide by immediate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
+pub fn svasrd_n_s8_m<const IMM2: i32>(pg: svbool_t, op1: svint8_t) -> svint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv16i8")]
+        fn _svasrd_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svasrd_n_s8_m(pg, op1, IMM2) }
+}
+#[doc = "Arithmetic shift right for divide by immediate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
+pub fn svasrd_n_s8_x<const IMM2: i32>(pg: svbool_t, op1: svint8_t) -> svint8_t {
+    svasrd_n_s8_m::<IMM2>(pg, op1)
+}
+#[doc = "Arithmetic shift right for divide by immediate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
+pub fn svasrd_n_s8_z<const IMM2: i32>(pg: svbool_t, op1: svint8_t) -> svint8_t {
+    svasrd_n_s8_m::<IMM2>(pg, svsel_s8(pg, op1, svdup_n_s8(0)))
+}
+#[doc = "Arithmetic shift right for divide by immediate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
+pub fn svasrd_n_s16_m<const IMM2: i32>(pg: svbool_t, op1: svint16_t) -> svint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv8i16")]
+        fn _svasrd_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svasrd_n_s16_m(pg.into(), op1, IMM2) }
+}
+#[doc = "Arithmetic shift right for divide by immediate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
+pub fn svasrd_n_s16_x<const IMM2: i32>(pg: svbool_t, op1: svint16_t) -> svint16_t {
+    svasrd_n_s16_m::<IMM2>(pg, op1)
+}
+#[doc = "Arithmetic shift right for divide by immediate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
+pub fn svasrd_n_s16_z<const IMM2: i32>(pg: svbool_t, op1: svint16_t) -> svint16_t {
+    svasrd_n_s16_m::<IMM2>(pg, svsel_s16(pg, op1, svdup_n_s16(0)))
+}
+#[doc = "Arithmetic shift right for divide by immediate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
+pub fn svasrd_n_s32_m<const IMM2: i32>(pg: svbool_t, op1: svint32_t) -> svint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv4i32")]
+        fn _svasrd_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svasrd_n_s32_m(pg.into(), op1, IMM2) }
+}
+#[doc = "Arithmetic shift right for divide by immediate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
+pub fn svasrd_n_s32_x<const IMM2: i32>(pg: svbool_t, op1: svint32_t) -> svint32_t {
+    svasrd_n_s32_m::<IMM2>(pg, op1)
+}
+#[doc = "Arithmetic shift right for divide by immediate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
+pub fn svasrd_n_s32_z<const IMM2: i32>(pg: svbool_t, op1: svint32_t) -> svint32_t {
+    svasrd_n_s32_m::<IMM2>(pg, svsel_s32(pg, op1, svdup_n_s32(0)))
+}
+#[doc = "Arithmetic shift right for divide by immediate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
+pub fn svasrd_n_s64_m<const IMM2: i32>(pg: svbool_t, op1: svint64_t) -> svint64_t {
+    static_assert_range!(IMM2, 1, 64);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.asrd.nxv2i64")]
+        fn _svasrd_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t;
+    }
+    unsafe { _svasrd_n_s64_m(pg.into(), op1, IMM2) }
+}
+#[doc = "Arithmetic shift right for divide by immediate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
+pub fn svasrd_n_s64_x<const IMM2: i32>(pg: svbool_t, op1: svint64_t) -> svint64_t {
+    svasrd_n_s64_m::<IMM2>(pg, op1)
+}
+#[doc = "Arithmetic shift right for divide by immediate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(asrd, IMM2 = 1))]
+pub fn svasrd_n_s64_z<const IMM2: i32>(pg: svbool_t, op1: svint64_t) -> svint64_t {
+    svasrd_n_s64_m::<IMM2>(pg, svsel_s64(pg, op1, svdup_n_s64(0)))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_b]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.z.nvx16i1")]
+        fn _svbic_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
+    }
+    unsafe { _svbic_b_z(pg, op1, op2) }
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv16i8")]
+        fn _svbic_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svbic_s8_m(pg, op1, op2) }
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svbic_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svbic_s8_m(pg, op1, op2)
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svbic_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svbic_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svbic_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv8i16")]
+        fn _svbic_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svbic_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svbic_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svbic_s16_m(pg, op1, op2)
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svbic_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svbic_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svbic_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv4i32")]
+        fn _svbic_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svbic_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svbic_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svbic_s32_m(pg, op1, op2)
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svbic_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svbic_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svbic_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bic.nxv2i64")]
+        fn _svbic_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svbic_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svbic_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svbic_s64_m(pg, op1, op2)
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svbic_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svbic_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svbic_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svbic_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svbic_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svbic_u8_m(pg, op1, op2)
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svbic_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svbic_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svbic_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svbic_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svbic_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svbic_u16_m(pg, op1, op2)
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svbic_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svbic_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svbic_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svbic_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svbic_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svbic_u32_m(pg, op1, op2)
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svbic_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svbic_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svbic_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svbic_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svbic_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svbic_u64_m(pg, op1, op2)
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svbic_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svbic_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Bitwise clear"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(bic))]
+pub fn svbic_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svbic_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Break after first true condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(brka))]
+pub fn svbrka_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brka.nxv16i1")]
+        fn _svbrka_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t;
+    }
+    unsafe { _svbrka_b_m(inactive, pg, op) }
+}
+#[doc = "Break after first true condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(brka))]
+pub fn svbrka_b_z(pg: svbool_t, op: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brka.z.nxv16i1")]
+        fn _svbrka_b_z(pg: svbool_t, op: svbool_t) -> svbool_t;
+    }
+    unsafe { _svbrka_b_z(pg, op) }
+}
+#[doc = "Break before first true condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(brkb))]
+pub fn svbrkb_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkb.nxv16i1")]
+        fn _svbrkb_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t;
+    }
+    unsafe { _svbrkb_b_m(inactive, pg, op) }
+}
+#[doc = "Break before first true condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(brkb))]
+pub fn svbrkb_b_z(pg: svbool_t, op: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkb.z.nxv16i1")]
+        fn _svbrkb_b_z(pg: svbool_t, op: svbool_t) -> svbool_t;
+    }
+    unsafe { _svbrkb_b_z(pg, op) }
+}
+#[doc = "Propagate break to next partition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkn[_b]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(brkn))]
+pub fn svbrkn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.brkn.z.nxv16i1")]
+        fn _svbrkn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
+    }
+    unsafe { _svbrkn_b_z(pg, op1, op2) }
+}
+#[doc = "Break after first true condition, propagating from previous partition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpa[_b]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(brkpa))]
+pub fn svbrkpa_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.brkpa.z.nxv16i1"
+        )]
+        fn _svbrkpa_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
+    }
+    unsafe { _svbrkpa_b_z(pg, op1, op2) }
+}
+#[doc = "Break before first true condition, propagating from previous partition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpb[_b]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(brkpb))]
+pub fn svbrkpb_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.brkpb.z.nxv16i1"
+        )]
+        fn _svbrkpb_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
+    }
+    unsafe { _svbrkpb_b_z(pg, op1, op2) }
+}
+#[doc = "Complex add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))]
+pub fn svcadd_f32_m<const IMM_ROTATION: i32>(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+) -> svfloat32_t {
+    static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv4f32")]
+        fn _svcadd_f32_m(
+            pg: svbool4_t,
+            op1: svfloat32_t,
+            op2: svfloat32_t,
+            imm_rotation: i32,
+        ) -> svfloat32_t;
+    }
+    unsafe { _svcadd_f32_m(pg.into(), op1, op2, IMM_ROTATION) }
+}
+#[doc = "Complex add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))]
+pub fn svcadd_f32_x<const IMM_ROTATION: i32>(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+) -> svfloat32_t {
+    svcadd_f32_m::<IMM_ROTATION>(pg, op1, op2)
+}
+#[doc = "Complex add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))]
+pub fn svcadd_f32_z<const IMM_ROTATION: i32>(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+) -> svfloat32_t {
+    svcadd_f32_m::<IMM_ROTATION>(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
+}
+#[doc = "Complex add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))]
+pub fn svcadd_f64_m<const IMM_ROTATION: i32>(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+) -> svfloat64_t {
+    static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcadd.nxv2f64")]
+        fn _svcadd_f64_m(
+            pg: svbool2_t,
+            op1: svfloat64_t,
+            op2: svfloat64_t,
+            imm_rotation: i32,
+        ) -> svfloat64_t;
+    }
+    unsafe { _svcadd_f64_m(pg.into(), op1, op2, IMM_ROTATION) }
+}
+#[doc = "Complex add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))]
+pub fn svcadd_f64_x<const IMM_ROTATION: i32>(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+) -> svfloat64_t {
+    svcadd_f64_m::<IMM_ROTATION>(pg, op1, op2)
+}
+#[doc = "Complex add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))]
+pub fn svcadd_f64_z<const IMM_ROTATION: i32>(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+) -> svfloat64_t {
+    svcadd_f64_m::<IMM_ROTATION>(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_f32(pg: svbool_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv4f32")]
+        fn _svclasta_f32(pg: svbool4_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svclasta_f32(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_f64(pg: svbool_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv2f64")]
+        fn _svclasta_f64(pg: svbool2_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svclasta_f64(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv16i8")]
+        fn _svclasta_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t;
+    }
+    unsafe { _svclasta_s8(pg, fallback, data) }
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_s16(pg: svbool_t, fallback: svint16_t, data: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv8i16")]
+        fn _svclasta_s16(pg: svbool8_t, fallback: svint16_t, data: svint16_t) -> svint16_t;
+    }
+    unsafe { _svclasta_s16(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_s32(pg: svbool_t, fallback: svint32_t, data: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv4i32")]
+        fn _svclasta_s32(pg: svbool4_t, fallback: svint32_t, data: svint32_t) -> svint32_t;
+    }
+    unsafe { _svclasta_s32(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_s64(pg: svbool_t, fallback: svint64_t, data: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clasta.nxv2i64")]
+        fn _svclasta_s64(pg: svbool2_t, fallback: svint64_t, data: svint64_t) -> svint64_t;
+    }
+    unsafe { _svclasta_s64(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_u8(pg: svbool_t, fallback: svuint8_t, data: svuint8_t) -> svuint8_t {
+    unsafe { svclasta_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_u16(pg: svbool_t, fallback: svuint16_t, data: svuint16_t) -> svuint16_t {
+    unsafe { svclasta_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_u32(pg: svbool_t, fallback: svuint32_t, data: svuint32_t) -> svuint32_t {
+    unsafe { svclasta_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_u64(pg: svbool_t, fallback: svuint64_t, data: svuint64_t) -> svuint64_t {
+    unsafe { svclasta_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_n_f32(pg: svbool_t, fallback: f32, data: svfloat32_t) -> f32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.clasta.n.nxv4f32"
+        )]
+        fn _svclasta_n_f32(pg: svbool4_t, fallback: f32, data: svfloat32_t) -> f32;
+    }
+    unsafe { _svclasta_n_f32(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_n_f64(pg: svbool_t, fallback: f64, data: svfloat64_t) -> f64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.clasta.n.nxv2f64"
+        )]
+        fn _svclasta_n_f64(pg: svbool2_t, fallback: f64, data: svfloat64_t) -> f64;
+    }
+    unsafe { _svclasta_n_f64(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.clasta.n.nxv16i8"
+        )]
+        fn _svclasta_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8;
+    }
+    unsafe { _svclasta_n_s8(pg, fallback, data) }
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_n_s16(pg: svbool_t, fallback: i16, data: svint16_t) -> i16 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.clasta.n.nxv8i16"
+        )]
+        fn _svclasta_n_s16(pg: svbool8_t, fallback: i16, data: svint16_t) -> i16;
+    }
+    unsafe { _svclasta_n_s16(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_n_s32(pg: svbool_t, fallback: i32, data: svint32_t) -> i32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.clasta.n.nxv4i32"
+        )]
+        fn _svclasta_n_s32(pg: svbool4_t, fallback: i32, data: svint32_t) -> i32;
+    }
+    unsafe { _svclasta_n_s32(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_n_s64(pg: svbool_t, fallback: i64, data: svint64_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.clasta.n.nxv2i64"
+        )]
+        fn _svclasta_n_s64(pg: svbool2_t, fallback: i64, data: svint64_t) -> i64;
+    }
+    unsafe { _svclasta_n_s64(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_n_u8(pg: svbool_t, fallback: u8, data: svuint8_t) -> u8 {
+    unsafe { svclasta_n_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_n_u16(pg: svbool_t, fallback: u16, data: svuint16_t) -> u16 {
+    unsafe { svclasta_n_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_n_u32(pg: svbool_t, fallback: u32, data: svuint32_t) -> u32 {
+    unsafe { svclasta_n_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
+}
+#[doc = "Conditionally extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clasta))]
+pub fn svclasta_n_u64(pg: svbool_t, fallback: u64, data: svuint64_t) -> u64 {
+    unsafe { svclasta_n_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_f32(pg: svbool_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv4f32")]
+        fn _svclastb_f32(pg: svbool4_t, fallback: svfloat32_t, data: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svclastb_f32(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_f64(pg: svbool_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv2f64")]
+        fn _svclastb_f64(pg: svbool2_t, fallback: svfloat64_t, data: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svclastb_f64(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv16i8")]
+        fn _svclastb_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t;
+    }
+    unsafe { _svclastb_s8(pg, fallback, data) }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_s16(pg: svbool_t, fallback: svint16_t, data: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv8i16")]
+        fn _svclastb_s16(pg: svbool8_t, fallback: svint16_t, data: svint16_t) -> svint16_t;
+    }
+    unsafe { _svclastb_s16(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_s32(pg: svbool_t, fallback: svint32_t, data: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv4i32")]
+        fn _svclastb_s32(pg: svbool4_t, fallback: svint32_t, data: svint32_t) -> svint32_t;
+    }
+    unsafe { _svclastb_s32(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_s64(pg: svbool_t, fallback: svint64_t, data: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clastb.nxv2i64")]
+        fn _svclastb_s64(pg: svbool2_t, fallback: svint64_t, data: svint64_t) -> svint64_t;
+    }
+    unsafe { _svclastb_s64(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_u8(pg: svbool_t, fallback: svuint8_t, data: svuint8_t) -> svuint8_t {
+    unsafe { svclastb_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_u16(pg: svbool_t, fallback: svuint16_t, data: svuint16_t) -> svuint16_t {
+    unsafe { svclastb_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_u32(pg: svbool_t, fallback: svuint32_t, data: svuint32_t) -> svuint32_t {
+    unsafe { svclastb_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_u64(pg: svbool_t, fallback: svuint64_t, data: svuint64_t) -> svuint64_t {
+    unsafe { svclastb_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_n_f32(pg: svbool_t, fallback: f32, data: svfloat32_t) -> f32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.clastb.n.nxv4f32"
+        )]
+        fn _svclastb_n_f32(pg: svbool4_t, fallback: f32, data: svfloat32_t) -> f32;
+    }
+    unsafe { _svclastb_n_f32(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_n_f64(pg: svbool_t, fallback: f64, data: svfloat64_t) -> f64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.clastb.n.nxv2f64"
+        )]
+        fn _svclastb_n_f64(pg: svbool2_t, fallback: f64, data: svfloat64_t) -> f64;
+    }
+    unsafe { _svclastb_n_f64(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.clastb.n.nxv16i8"
+        )]
+        fn _svclastb_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8;
+    }
+    unsafe { _svclastb_n_s8(pg, fallback, data) }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_n_s16(pg: svbool_t, fallback: i16, data: svint16_t) -> i16 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.clastb.n.nxv8i16"
+        )]
+        fn _svclastb_n_s16(pg: svbool8_t, fallback: i16, data: svint16_t) -> i16;
+    }
+    unsafe { _svclastb_n_s16(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_n_s32(pg: svbool_t, fallback: i32, data: svint32_t) -> i32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.clastb.n.nxv4i32"
+        )]
+        fn _svclastb_n_s32(pg: svbool4_t, fallback: i32, data: svint32_t) -> i32;
+    }
+    unsafe { _svclastb_n_s32(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_n_s64(pg: svbool_t, fallback: i64, data: svint64_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.clastb.n.nxv2i64"
+        )]
+        fn _svclastb_n_s64(pg: svbool2_t, fallback: i64, data: svint64_t) -> i64;
+    }
+    unsafe { _svclastb_n_s64(pg.into(), fallback, data) }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_n_u8(pg: svbool_t, fallback: u8, data: svuint8_t) -> u8 {
+    unsafe { svclastb_n_s8(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_n_u16(pg: svbool_t, fallback: u16, data: svuint16_t) -> u16 {
+    unsafe { svclastb_n_s16(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_n_u32(pg: svbool_t, fallback: u32, data: svuint32_t) -> u32 {
+    unsafe { svclastb_n_s32(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
+}
+#[doc = "Conditionally extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clastb))]
+pub fn svclastb_n_u64(pg: svbool_t, fallback: u64, data: svuint64_t) -> u64 {
+    unsafe { svclastb_n_s64(pg, fallback.as_signed(), data.as_signed()).as_unsigned() }
+}
+#[doc = "Count leading sign bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cls))]
+pub fn svcls_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv16i8")]
+        fn _svcls_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
+    }
+    unsafe { _svcls_s8_m(inactive.as_signed(), pg, op).as_unsigned() }
+}
+#[doc = "Count leading sign bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cls))]
+pub fn svcls_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t {
+    unsafe { svcls_s8_m(op.as_unsigned(), pg, op) }
+}
+#[doc = "Count leading sign bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cls))]
+pub fn svcls_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t {
+    svcls_s8_m(svdup_n_u8(0), pg, op)
+}
+#[doc = "Count leading sign bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cls))]
+pub fn svcls_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv8i16")]
+        fn _svcls_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
+    }
+    unsafe { _svcls_s16_m(inactive.as_signed(), pg.into(), op).as_unsigned() }
+}
+#[doc = "Count leading sign bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cls))]
+pub fn svcls_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t {
+    unsafe { svcls_s16_m(op.as_unsigned(), pg, op) }
+}
+#[doc = "Count leading sign bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cls))]
+pub fn svcls_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t {
+    svcls_s16_m(svdup_n_u16(0), pg, op)
+}
+#[doc = "Count leading sign bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cls))]
+pub fn svcls_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv4i32")]
+        fn _svcls_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svcls_s32_m(inactive.as_signed(), pg.into(), op).as_unsigned() }
+}
+#[doc = "Count leading sign bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cls))]
+pub fn svcls_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t {
+    unsafe { svcls_s32_m(op.as_unsigned(), pg, op) }
+}
+#[doc = "Count leading sign bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cls))]
+pub fn svcls_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t {
+    svcls_s32_m(svdup_n_u32(0), pg, op)
+}
+#[doc = "Count leading sign bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cls))]
+pub fn svcls_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cls.nxv2i64")]
+        fn _svcls_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svcls_s64_m(inactive.as_signed(), pg.into(), op).as_unsigned() }
+}
+#[doc = "Count leading sign bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cls))]
+pub fn svcls_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t {
+    unsafe { svcls_s64_m(op.as_unsigned(), pg, op) }
+}
+#[doc = "Count leading sign bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cls))]
+pub fn svcls_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t {
+    svcls_s64_m(svdup_n_u64(0), pg, op)
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv16i8")]
+        fn _svclz_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
+    }
+    unsafe { _svclz_s8_m(inactive.as_signed(), pg, op).as_unsigned() }
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t {
+    unsafe { svclz_s8_m(op.as_unsigned(), pg, op) }
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t {
+    svclz_s8_m(svdup_n_u8(0), pg, op)
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv8i16")]
+        fn _svclz_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
+    }
+    unsafe { _svclz_s16_m(inactive.as_signed(), pg.into(), op).as_unsigned() }
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t {
+    unsafe { svclz_s16_m(op.as_unsigned(), pg, op) }
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t {
+    svclz_s16_m(svdup_n_u16(0), pg, op)
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv4i32")]
+        fn _svclz_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svclz_s32_m(inactive.as_signed(), pg.into(), op).as_unsigned() }
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t {
+    unsafe { svclz_s32_m(op.as_unsigned(), pg, op) }
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t {
+    svclz_s32_m(svdup_n_u32(0), pg, op)
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.clz.nxv2i64")]
+        fn _svclz_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svclz_s64_m(inactive.as_signed(), pg.into(), op).as_unsigned() }
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t {
+    unsafe { svclz_s64_m(op.as_unsigned(), pg, op) }
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t {
+    svclz_s64_m(svdup_n_u64(0), pg, op)
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t {
+    unsafe { svclz_s8_m(inactive, pg, op.as_signed()) }
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t {
+    svclz_u8_m(op, pg, op)
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t {
+    svclz_u8_m(svdup_n_u8(0), pg, op)
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    unsafe { svclz_s16_m(inactive, pg, op.as_signed()) }
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    svclz_u16_m(op, pg, op)
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    svclz_u16_m(svdup_n_u16(0), pg, op)
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    unsafe { svclz_s32_m(inactive, pg, op.as_signed()) }
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svclz_u32_m(op, pg, op)
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svclz_u32_m(svdup_n_u32(0), pg, op)
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    unsafe { svclz_s64_m(inactive, pg, op.as_signed()) }
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svclz_u64_m(op, pg, op)
+}
+#[doc = "Count leading zero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(clz))]
+pub fn svclz_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svclz_u64_m(svdup_n_u64(0), pg, op)
+}
+#[doc = "Complex multiply-add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))]
+pub fn svcmla_f32_m<const IMM_ROTATION: i32>(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv4f32")]
+        fn _svcmla_f32_m(
+            pg: svbool4_t,
+            op1: svfloat32_t,
+            op2: svfloat32_t,
+            op3: svfloat32_t,
+            imm_rotation: i32,
+        ) -> svfloat32_t;
+    }
+    unsafe { _svcmla_f32_m(pg.into(), op1, op2, op3, IMM_ROTATION) }
+}
+#[doc = "Complex multiply-add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))]
+pub fn svcmla_f32_x<const IMM_ROTATION: i32>(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    svcmla_f32_m::<IMM_ROTATION>(pg, op1, op2, op3)
+}
+#[doc = "Complex multiply-add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))]
+pub fn svcmla_f32_z<const IMM_ROTATION: i32>(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    svcmla_f32_m::<IMM_ROTATION>(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3)
+}
+#[doc = "Complex multiply-add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))]
+pub fn svcmla_f64_m<const IMM_ROTATION: i32>(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmla.nxv2f64")]
+        fn _svcmla_f64_m(
+            pg: svbool2_t,
+            op1: svfloat64_t,
+            op2: svfloat64_t,
+            op3: svfloat64_t,
+            imm_rotation: i32,
+        ) -> svfloat64_t;
+    }
+    unsafe { _svcmla_f64_m(pg.into(), op1, op2, op3, IMM_ROTATION) }
+}
+#[doc = "Complex multiply-add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))]
+pub fn svcmla_f64_x<const IMM_ROTATION: i32>(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    svcmla_f64_m::<IMM_ROTATION>(pg, op1, op2, op3)
+}
+#[doc = "Complex multiply-add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))]
+pub fn svcmla_f64_z<const IMM_ROTATION: i32>(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    svcmla_f64_m::<IMM_ROTATION>(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3)
+}
+#[doc = "Complex multiply-add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmla, IMM_INDEX = 0, IMM_ROTATION = 90))]
+pub fn svcmla_lane_f32<const IMM_INDEX: i32, const IMM_ROTATION: i32>(
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.fcmla.lane.x.nxv4f32"
+        )]
+        fn _svcmla_lane_f32(
+            op1: svfloat32_t,
+            op2: svfloat32_t,
+            op3: svfloat32_t,
+            imm_index: i32,
+            imm_rotation: i32,
+        ) -> svfloat32_t;
+    }
+    unsafe { _svcmla_lane_f32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) }
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmeq))]
+pub fn svcmpeq_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv4f32")]
+        fn _svcmpeq_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t;
+    }
+    unsafe { _svcmpeq_f32(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmeq))]
+pub fn svcmpeq_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
+    svcmpeq_f32(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmeq))]
+pub fn svcmpeq_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpeq.nxv2f64")]
+        fn _svcmpeq_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t;
+    }
+    unsafe { _svcmpeq_f64(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmeq))]
+pub fn svcmpeq_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
+    svcmpeq_f64(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv16i8")]
+        fn _svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t;
+    }
+    unsafe { _svcmpeq_s8(pg, op1, op2) }
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t {
+    svcmpeq_s8(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv8i16")]
+        fn _svcmpeq_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t;
+    }
+    unsafe { _svcmpeq_s16(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t {
+    svcmpeq_s16(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv4i32")]
+        fn _svcmpeq_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t;
+    }
+    unsafe { _svcmpeq_s32(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t {
+    svcmpeq_s32(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpeq.nxv2i64")]
+        fn _svcmpeq_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t;
+    }
+    unsafe { _svcmpeq_s64(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t {
+    svcmpeq_s64(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t {
+    unsafe { svcmpeq_s8(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t {
+    svcmpeq_u8(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t {
+    unsafe { svcmpeq_s16(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t {
+    svcmpeq_u16(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t {
+    unsafe { svcmpeq_s32(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t {
+    svcmpeq_u32(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t {
+    unsafe { svcmpeq_s64(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t {
+    svcmpeq_u64(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmpeq.wide.nxv16i8"
+        )]
+        fn _svcmpeq_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
+    }
+    unsafe { _svcmpeq_wide_s8(pg, op1, op2) }
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t {
+    svcmpeq_wide_s8(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmpeq.wide.nxv8i16"
+        )]
+        fn _svcmpeq_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
+    }
+    unsafe { _svcmpeq_wide_s16(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t {
+    svcmpeq_wide_s16(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmpeq.wide.nxv4i32"
+        )]
+        fn _svcmpeq_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
+    }
+    unsafe { _svcmpeq_wide_s32(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpeq))]
+pub fn svcmpeq_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t {
+    svcmpeq_wide_s32(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmge))]
+pub fn svcmpge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv4f32")]
+        fn _svcmpge_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t;
+    }
+    unsafe { _svcmpge_f32(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmge))]
+pub fn svcmpge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
+    svcmpge_f32(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmge))]
+pub fn svcmpge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpge.nxv2f64")]
+        fn _svcmpge_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t;
+    }
+    unsafe { _svcmpge_f64(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmge))]
+pub fn svcmpge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
+    svcmpge_f64(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv16i8")]
+        fn _svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t;
+    }
+    unsafe { _svcmpge_s8(pg, op1, op2) }
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmpge_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t {
+    svcmpge_s8(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmpge_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv8i16")]
+        fn _svcmpge_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t;
+    }
+    unsafe { _svcmpge_s16(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmpge_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t {
+    svcmpge_s16(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmpge_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv4i32")]
+        fn _svcmpge_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t;
+    }
+    unsafe { _svcmpge_s32(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmpge_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t {
+    svcmpge_s32(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmpge_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpge.nxv2i64")]
+        fn _svcmpge_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t;
+    }
+    unsafe { _svcmpge_s64(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmpge_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t {
+    svcmpge_s64(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmpge_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv16i8")]
+        fn _svcmpge_u8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t;
+    }
+    unsafe { _svcmpge_u8(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmpge_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t {
+    svcmpge_u8(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmpge_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv8i16")]
+        fn _svcmpge_u16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t;
+    }
+    unsafe { _svcmpge_u16(pg.into(), op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmpge_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t {
+    svcmpge_u16(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmpge_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv4i32")]
+        fn _svcmpge_u32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t;
+    }
+    unsafe { _svcmpge_u32(pg.into(), op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmpge_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t {
+    svcmpge_u32(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmpge_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphs.nxv2i64")]
+        fn _svcmpge_u64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t;
+    }
+    unsafe { _svcmpge_u64(pg.into(), op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmpge_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t {
+    svcmpge_u64(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmpge_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmpge.wide.nxv16i8"
+        )]
+        fn _svcmpge_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
+    }
+    unsafe { _svcmpge_wide_s8(pg, op1, op2) }
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmpge_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t {
+    svcmpge_wide_s8(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmpge_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmpge.wide.nxv8i16"
+        )]
+        fn _svcmpge_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
+    }
+    unsafe { _svcmpge_wide_s16(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmpge_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t {
+    svcmpge_wide_s16(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmpge_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmpge.wide.nxv4i32"
+        )]
+        fn _svcmpge_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
+    }
+    unsafe { _svcmpge_wide_s32(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmpge_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t {
+    svcmpge_wide_s32(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmpge_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmphs.wide.nxv16i8"
+        )]
+        fn _svcmpge_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
+    }
+    unsafe { _svcmpge_wide_u8(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmpge_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t {
+    svcmpge_wide_u8(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmpge_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmphs.wide.nxv8i16"
+        )]
+        fn _svcmpge_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
+    }
+    unsafe { _svcmpge_wide_u16(pg.into(), op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmpge_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t {
+    svcmpge_wide_u16(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmpge_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmphs.wide.nxv4i32"
+        )]
+        fn _svcmpge_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
+    }
+    unsafe { _svcmpge_wide_u32(pg.into(), op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "Compare greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmpge_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t {
+    svcmpge_wide_u32(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmgt))]
+pub fn svcmpgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv4f32")]
+        fn _svcmpgt_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t;
+    }
+    unsafe { _svcmpgt_f32(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmgt))]
+pub fn svcmpgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
+    svcmpgt_f32(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmgt))]
+pub fn svcmpgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpgt.nxv2f64")]
+        fn _svcmpgt_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t;
+    }
+    unsafe { _svcmpgt_f64(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmgt))]
+pub fn svcmpgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
+    svcmpgt_f64(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv16i8")]
+        fn _svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t;
+    }
+    unsafe { _svcmpgt_s8(pg, op1, op2) }
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmpgt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t {
+    svcmpgt_s8(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmpgt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv8i16")]
+        fn _svcmpgt_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t;
+    }
+    unsafe { _svcmpgt_s16(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmpgt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t {
+    svcmpgt_s16(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmpgt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv4i32")]
+        fn _svcmpgt_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t;
+    }
+    unsafe { _svcmpgt_s32(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmpgt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t {
+    svcmpgt_s32(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmpgt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpgt.nxv2i64")]
+        fn _svcmpgt_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t;
+    }
+    unsafe { _svcmpgt_s64(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmpgt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t {
+    svcmpgt_s64(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmpgt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv16i8")]
+        fn _svcmpgt_u8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t;
+    }
+    unsafe { _svcmpgt_u8(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmpgt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t {
+    svcmpgt_u8(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmpgt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv8i16")]
+        fn _svcmpgt_u16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t;
+    }
+    unsafe { _svcmpgt_u16(pg.into(), op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmpgt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t {
+    svcmpgt_u16(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmpgt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv4i32")]
+        fn _svcmpgt_u32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t;
+    }
+    unsafe { _svcmpgt_u32(pg.into(), op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmpgt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t {
+    svcmpgt_u32(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmpgt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmphi.nxv2i64")]
+        fn _svcmpgt_u64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t;
+    }
+    unsafe { _svcmpgt_u64(pg.into(), op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmpgt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t {
+    svcmpgt_u64(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmpgt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmpgt.wide.nxv16i8"
+        )]
+        fn _svcmpgt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
+    }
+    unsafe { _svcmpgt_wide_s8(pg, op1, op2) }
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmpgt_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t {
+    svcmpgt_wide_s8(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmpgt_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmpgt.wide.nxv8i16"
+        )]
+        fn _svcmpgt_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
+    }
+    unsafe { _svcmpgt_wide_s16(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmpgt_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t {
+    svcmpgt_wide_s16(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmpgt_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmpgt.wide.nxv4i32"
+        )]
+        fn _svcmpgt_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
+    }
+    unsafe { _svcmpgt_wide_s32(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmpgt_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t {
+    svcmpgt_wide_s32(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmpgt_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmphi.wide.nxv16i8"
+        )]
+        fn _svcmpgt_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
+    }
+    unsafe { _svcmpgt_wide_u8(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmpgt_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t {
+    svcmpgt_wide_u8(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmpgt_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmphi.wide.nxv8i16"
+        )]
+        fn _svcmpgt_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
+    }
+    unsafe { _svcmpgt_wide_u16(pg.into(), op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmpgt_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t {
+    svcmpgt_wide_u16(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmpgt_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmphi.wide.nxv4i32"
+        )]
+        fn _svcmpgt_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
+    }
+    unsafe { _svcmpgt_wide_u32(pg.into(), op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "Compare greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmpgt_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t {
+    svcmpgt_wide_u32(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmge))]
+pub fn svcmple_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
+    svcmpge_f32(pg, op2, op1)
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmge))]
+pub fn svcmple_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
+    svcmple_f32(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmge))]
+pub fn svcmple_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
+    svcmpge_f64(pg, op2, op1)
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmge))]
+pub fn svcmple_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
+    svcmple_f64(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmple_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t {
+    svcmpge_s8(pg, op2, op1)
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmple_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t {
+    svcmple_s8(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmple_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t {
+    svcmpge_s16(pg, op2, op1)
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmple_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t {
+    svcmple_s16(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmple_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t {
+    svcmpge_s32(pg, op2, op1)
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmple_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t {
+    svcmple_s32(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmple_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t {
+    svcmpge_s64(pg, op2, op1)
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpge))]
+pub fn svcmple_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t {
+    svcmple_s64(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmple_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t {
+    svcmpge_u8(pg, op2, op1)
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmple_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t {
+    svcmple_u8(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmple_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t {
+    svcmpge_u16(pg, op2, op1)
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmple_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t {
+    svcmple_u16(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmple_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t {
+    svcmpge_u32(pg, op2, op1)
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmple_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t {
+    svcmple_u32(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmple_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t {
+    svcmpge_u64(pg, op2, op1)
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphs))]
+pub fn svcmple_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t {
+    svcmple_u64(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmple))]
+pub fn svcmple_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmple.wide.nxv16i8"
+        )]
+        fn _svcmple_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
+    }
+    unsafe { _svcmple_wide_s8(pg, op1, op2) }
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmple))]
+pub fn svcmple_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t {
+    svcmple_wide_s8(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmple))]
+pub fn svcmple_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmple.wide.nxv8i16"
+        )]
+        fn _svcmple_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
+    }
+    unsafe { _svcmple_wide_s16(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmple))]
+pub fn svcmple_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t {
+    svcmple_wide_s16(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmple))]
+pub fn svcmple_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmple.wide.nxv4i32"
+        )]
+        fn _svcmple_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
+    }
+    unsafe { _svcmple_wide_s32(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmple))]
+pub fn svcmple_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t {
+    svcmple_wide_s32(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpls))]
+pub fn svcmple_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmpls.wide.nxv16i8"
+        )]
+        fn _svcmple_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
+    }
+    unsafe { _svcmple_wide_u8(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpls))]
+pub fn svcmple_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t {
+    svcmple_wide_u8(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpls))]
+pub fn svcmple_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmpls.wide.nxv8i16"
+        )]
+        fn _svcmple_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
+    }
+    unsafe { _svcmple_wide_u16(pg.into(), op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpls))]
+pub fn svcmple_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t {
+    svcmple_wide_u16(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpls))]
+pub fn svcmple_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmpls.wide.nxv4i32"
+        )]
+        fn _svcmple_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
+    }
+    unsafe { _svcmple_wide_u32(pg.into(), op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "Compare less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpls))]
+pub fn svcmple_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t {
+    svcmple_wide_u32(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmgt))]
+pub fn svcmplt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
+    svcmpgt_f32(pg, op2, op1)
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmgt))]
+pub fn svcmplt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
+    svcmplt_f32(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmgt))]
+pub fn svcmplt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
+    svcmpgt_f64(pg, op2, op1)
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmgt))]
+pub fn svcmplt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
+    svcmplt_f64(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmplt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t {
+    svcmpgt_s8(pg, op2, op1)
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmplt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t {
+    svcmplt_s8(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmplt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t {
+    svcmpgt_s16(pg, op2, op1)
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmplt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t {
+    svcmplt_s16(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmplt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t {
+    svcmpgt_s32(pg, op2, op1)
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmplt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t {
+    svcmplt_s32(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmplt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t {
+    svcmpgt_s64(pg, op2, op1)
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpgt))]
+pub fn svcmplt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t {
+    svcmplt_s64(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmplt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t {
+    svcmpgt_u8(pg, op2, op1)
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmplt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t {
+    svcmplt_u8(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmplt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t {
+    svcmpgt_u16(pg, op2, op1)
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmplt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t {
+    svcmplt_u16(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmplt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t {
+    svcmpgt_u32(pg, op2, op1)
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmplt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t {
+    svcmplt_u32(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmplt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t {
+    svcmpgt_u64(pg, op2, op1)
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmphi))]
+pub fn svcmplt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t {
+    svcmplt_u64(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmplt))]
+pub fn svcmplt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmplt.wide.nxv16i8"
+        )]
+        fn _svcmplt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
+    }
+    unsafe { _svcmplt_wide_s8(pg, op1, op2) }
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmplt))]
+pub fn svcmplt_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t {
+    svcmplt_wide_s8(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmplt))]
+pub fn svcmplt_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmplt.wide.nxv8i16"
+        )]
+        fn _svcmplt_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
+    }
+    unsafe { _svcmplt_wide_s16(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmplt))]
+pub fn svcmplt_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t {
+    svcmplt_wide_s16(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmplt))]
+pub fn svcmplt_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmplt.wide.nxv4i32"
+        )]
+        fn _svcmplt_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
+    }
+    unsafe { _svcmplt_wide_s32(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmplt))]
+pub fn svcmplt_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t {
+    svcmplt_wide_s32(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmplo))]
+pub fn svcmplt_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmplo.wide.nxv16i8"
+        )]
+        fn _svcmplt_wide_u8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
+    }
+    unsafe { _svcmplt_wide_u8(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmplo))]
+pub fn svcmplt_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t {
+    svcmplt_wide_u8(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmplo))]
+pub fn svcmplt_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmplo.wide.nxv8i16"
+        )]
+        fn _svcmplt_wide_u16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
+    }
+    unsafe { _svcmplt_wide_u16(pg.into(), op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmplo))]
+pub fn svcmplt_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t {
+    svcmplt_wide_u16(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmplo))]
+pub fn svcmplt_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmplo.wide.nxv4i32"
+        )]
+        fn _svcmplt_wide_u32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
+    }
+    unsafe { _svcmplt_wide_u32(pg.into(), op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "Compare less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmplo))]
+pub fn svcmplt_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t {
+    svcmplt_wide_u32(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmne))]
+pub fn svcmpne_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv4f32")]
+        fn _svcmpne_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t;
+    }
+    unsafe { _svcmpne_f32(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmne))]
+pub fn svcmpne_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
+    svcmpne_f32(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmne))]
+pub fn svcmpne_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpne.nxv2f64")]
+        fn _svcmpne_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t;
+    }
+    unsafe { _svcmpne_f64(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmne))]
+pub fn svcmpne_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
+    svcmpne_f64(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv16i8")]
+        fn _svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t;
+    }
+    unsafe { _svcmpne_s8(pg, op1, op2) }
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t {
+    svcmpne_s8(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv8i16")]
+        fn _svcmpne_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t;
+    }
+    unsafe { _svcmpne_s16(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t {
+    svcmpne_s16(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv4i32")]
+        fn _svcmpne_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svbool4_t;
+    }
+    unsafe { _svcmpne_s32(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t {
+    svcmpne_s32(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmpne.nxv2i64")]
+        fn _svcmpne_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svbool2_t;
+    }
+    unsafe { _svcmpne_s64(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t {
+    svcmpne_s64(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t {
+    unsafe { svcmpne_s8(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t {
+    svcmpne_u8(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t {
+    unsafe { svcmpne_s16(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t {
+    svcmpne_u16(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t {
+    unsafe { svcmpne_s32(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t {
+    svcmpne_u32(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t {
+    unsafe { svcmpne_s64(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t {
+    svcmpne_u64(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmpne.wide.nxv16i8"
+        )]
+        fn _svcmpne_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t;
+    }
+    unsafe { _svcmpne_wide_s8(pg, op1, op2) }
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t {
+    svcmpne_wide_s8(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmpne.wide.nxv8i16"
+        )]
+        fn _svcmpne_wide_s16(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svbool8_t;
+    }
+    unsafe { _svcmpne_wide_s16(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t {
+    svcmpne_wide_s16(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmpne.wide.nxv4i32"
+        )]
+        fn _svcmpne_wide_s32(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svbool4_t;
+    }
+    unsafe { _svcmpne_wide_s32(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare not equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cmpne))]
+pub fn svcmpne_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t {
+    svcmpne_wide_s32(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Compare unordered with"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmuo))]
+pub fn svcmpuo_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpuo.nxv4f32")]
+        fn _svcmpuo_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool4_t;
+    }
+    unsafe { _svcmpuo_f32(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare unordered with"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_n_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmuo))]
+pub fn svcmpuo_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t {
+    svcmpuo_f32(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Compare unordered with"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmuo))]
+pub fn svcmpuo_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcmpuo.nxv2f64")]
+        fn _svcmpuo_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool2_t;
+    }
+    unsafe { _svcmpuo_f64(pg.into(), op1, op2).into() }
+}
+#[doc = "Compare unordered with"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_n_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcmuo))]
+pub fn svcmpuo_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t {
+    svcmpuo_f64(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv16i8")]
+        fn _svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
+    }
+    unsafe { _svcnot_s8_m(inactive, pg, op) }
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t {
+    svcnot_s8_m(op, pg, op)
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t {
+    svcnot_s8_m(svdup_n_s8(0), pg, op)
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv8i16")]
+        fn _svcnot_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
+    }
+    unsafe { _svcnot_s16_m(inactive, pg.into(), op) }
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t {
+    svcnot_s16_m(op, pg, op)
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t {
+    svcnot_s16_m(svdup_n_s16(0), pg, op)
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv4i32")]
+        fn _svcnot_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svcnot_s32_m(inactive, pg.into(), op) }
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svcnot_s32_m(op, pg, op)
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svcnot_s32_m(svdup_n_s32(0), pg, op)
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnot.nxv2i64")]
+        fn _svcnot_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svcnot_s64_m(inactive, pg.into(), op) }
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svcnot_s64_m(op, pg, op)
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svcnot_s64_m(svdup_n_s64(0), pg, op)
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t {
+    unsafe { svcnot_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t {
+    svcnot_u8_m(op, pg, op)
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t {
+    svcnot_u8_m(svdup_n_u8(0), pg, op)
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    unsafe { svcnot_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    svcnot_u16_m(op, pg, op)
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    svcnot_u16_m(svdup_n_u16(0), pg, op)
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    unsafe { svcnot_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svcnot_u32_m(op, pg, op)
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svcnot_u32_m(svdup_n_u32(0), pg, op)
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    unsafe { svcnot_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svcnot_u64_m(op, pg, op)
+}
+#[doc = "Logically invert boolean condition"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnot))]
+pub fn svcnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svcnot_u64_m(svdup_n_u64(0), pg, op)
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_f32_m(inactive: svuint32_t, pg: svbool_t, op: svfloat32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv4f32")]
+        fn _svcnt_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t;
+    }
+    unsafe { _svcnt_f32_m(inactive.as_signed(), pg.into(), op).as_unsigned() }
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint32_t {
+    unsafe { svcnt_f32_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint32_t {
+    svcnt_f32_m(svdup_n_u32(0), pg, op)
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_f64_m(inactive: svuint64_t, pg: svbool_t, op: svfloat64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv2f64")]
+        fn _svcnt_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t;
+    }
+    unsafe { _svcnt_f64_m(inactive.as_signed(), pg.into(), op).as_unsigned() }
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint64_t {
+    unsafe { svcnt_f64_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint64_t {
+    svcnt_f64_m(svdup_n_u64(0), pg, op)
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv16i8")]
+        fn _svcnt_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
+    }
+    unsafe { _svcnt_s8_m(inactive.as_signed(), pg, op).as_unsigned() }
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t {
+    unsafe { svcnt_s8_m(op.as_unsigned(), pg, op) }
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t {
+    svcnt_s8_m(svdup_n_u8(0), pg, op)
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv8i16")]
+        fn _svcnt_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
+    }
+    unsafe { _svcnt_s16_m(inactive.as_signed(), pg.into(), op).as_unsigned() }
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t {
+    unsafe { svcnt_s16_m(op.as_unsigned(), pg, op) }
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t {
+    svcnt_s16_m(svdup_n_u16(0), pg, op)
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv4i32")]
+        fn _svcnt_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svcnt_s32_m(inactive.as_signed(), pg.into(), op).as_unsigned() }
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t {
+    unsafe { svcnt_s32_m(op.as_unsigned(), pg, op) }
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t {
+    svcnt_s32_m(svdup_n_u32(0), pg, op)
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnt.nxv2i64")]
+        fn _svcnt_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svcnt_s64_m(inactive.as_signed(), pg.into(), op).as_unsigned() }
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t {
+    unsafe { svcnt_s64_m(op.as_unsigned(), pg, op) }
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t {
+    svcnt_s64_m(svdup_n_u64(0), pg, op)
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t {
+    unsafe { svcnt_s8_m(inactive, pg, op.as_signed()) }
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t {
+    svcnt_u8_m(op, pg, op)
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t {
+    svcnt_u8_m(svdup_n_u8(0), pg, op)
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    unsafe { svcnt_s16_m(inactive, pg, op.as_signed()) }
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    svcnt_u16_m(op, pg, op)
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    svcnt_u16_m(svdup_n_u16(0), pg, op)
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    unsafe { svcnt_s32_m(inactive, pg, op.as_signed()) }
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svcnt_u32_m(op, pg, op)
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svcnt_u32_m(svdup_n_u32(0), pg, op)
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    unsafe { svcnt_s64_m(inactive, pg, op.as_signed()) }
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svcnt_u64_m(op, pg, op)
+}
+#[doc = "Count nonzero bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnt))]
+pub fn svcnt_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svcnt_u64_m(svdup_n_u64(0), pg, op)
+}
+#[doc = "Count the number of 8-bit elements in a vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rdvl))]
+pub fn svcntb() -> u64 {
+    svcntb_pat::<{ svpattern::SV_ALL }>()
+}
+#[doc = "Count the number of 16-bit elements in a vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnth))]
+pub fn svcnth() -> u64 {
+    svcnth_pat::<{ svpattern::SV_ALL }>()
+}
+#[doc = "Count the number of 32-bit elements in a vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntw)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cntw))]
+pub fn svcntw() -> u64 {
+    svcntw_pat::<{ svpattern::SV_ALL }>()
+}
+#[doc = "Count the number of 64-bit elements in a vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cntd))]
+pub fn svcntd() -> u64 {
+    svcntd_pat::<{ svpattern::SV_ALL }>()
+}
+#[doc = "Count the number of 8-bit elements in a vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb_pat)"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (rdvl , PATTERN = { svpattern :: SV_ALL }))]
+# [cfg_attr (test , assert_instr (cntb , PATTERN = { svpattern :: SV_MUL4 }))]
+pub fn svcntb_pat<const PATTERN: svpattern>() -> u64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntb")]
+        fn _svcntb_pat(pattern: svpattern) -> i64;
+    }
+    unsafe { _svcntb_pat(PATTERN).as_unsigned() }
+}
+#[doc = "Count the number of 16-bit elements in a vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth_pat)"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (cnth , PATTERN = { svpattern :: SV_ALL }))]
+pub fn svcnth_pat<const PATTERN: svpattern>() -> u64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cnth")]
+        fn _svcnth_pat(pattern: svpattern) -> i64;
+    }
+    unsafe { _svcnth_pat(PATTERN).as_unsigned() }
+}
+#[doc = "Count the number of 32-bit elements in a vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntw_pat)"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (cntw , PATTERN = { svpattern :: SV_ALL }))]
+pub fn svcntw_pat<const PATTERN: svpattern>() -> u64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntw")]
+        fn _svcntw_pat(pattern: svpattern) -> i64;
+    }
+    unsafe { _svcntw_pat(PATTERN).as_unsigned() }
+}
+#[doc = "Count the number of 64-bit elements in a vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd_pat)"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (cntd , PATTERN = { svpattern :: SV_ALL }))]
+pub fn svcntd_pat<const PATTERN: svpattern>() -> u64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntd")]
+        fn _svcntd_pat(pattern: svpattern) -> i64;
+    }
+    unsafe { _svcntd_pat(PATTERN).as_unsigned() }
+}
+#[doc = "Count set predicate bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cntp))]
+pub fn svcntp_b8(pg: svbool_t, op: svbool_t) -> u64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv16i1")]
+        fn _svcntp_b8(pg: svbool_t, op: svbool_t) -> i64;
+    }
+    unsafe { _svcntp_b8(pg, op).as_unsigned() }
+}
+#[doc = "Count set predicate bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cntp))]
+pub fn svcntp_b16(pg: svbool_t, op: svbool_t) -> u64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv8i1")]
+        fn _svcntp_b16(pg: svbool8_t, op: svbool8_t) -> i64;
+    }
+    unsafe { _svcntp_b16(pg.into(), op.into()).as_unsigned() }
+}
+#[doc = "Count set predicate bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cntp))]
+pub fn svcntp_b32(pg: svbool_t, op: svbool_t) -> u64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv4i1")]
+        fn _svcntp_b32(pg: svbool4_t, op: svbool4_t) -> i64;
+    }
+    unsafe { _svcntp_b32(pg.into(), op.into()).as_unsigned() }
+}
+#[doc = "Count set predicate bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cntp))]
+pub fn svcntp_b64(pg: svbool_t, op: svbool_t) -> u64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cntp.nxv2i1")]
+        fn _svcntp_b64(pg: svbool2_t, op: svbool2_t) -> i64;
+    }
+    unsafe { _svcntp_b64(pg.into(), op.into()).as_unsigned() }
+}
+#[doc = "Shuffle active elements of vector to the right and fill with zero"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(compact))]
+pub fn svcompact_f32(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.compact.nxv4f32"
+        )]
+        fn _svcompact_f32(pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svcompact_f32(pg.into(), op) }
+}
+#[doc = "Shuffle active elements of vector to the right and fill with zero"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(compact))]
+pub fn svcompact_f64(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.compact.nxv2f64"
+        )]
+        fn _svcompact_f64(pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svcompact_f64(pg.into(), op) }
+}
+#[doc = "Shuffle active elements of vector to the right and fill with zero"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(compact))]
+pub fn svcompact_s32(pg: svbool_t, op: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.compact.nxv4i32"
+        )]
+        fn _svcompact_s32(pg: svbool4_t, op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svcompact_s32(pg.into(), op) }
+}
+#[doc = "Shuffle active elements of vector to the right and fill with zero"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(compact))]
+pub fn svcompact_s64(pg: svbool_t, op: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.compact.nxv2i64"
+        )]
+        fn _svcompact_s64(pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svcompact_s64(pg.into(), op) }
+}
+#[doc = "Shuffle active elements of vector to the right and fill with zero"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(compact))]
+pub fn svcompact_u32(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    unsafe { svcompact_s32(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Shuffle active elements of vector to the right and fill with zero"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(compact))]
+pub fn svcompact_u64(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    unsafe { svcompact_s64(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Create a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate2_f32(x0: svfloat32_t, x1: svfloat32_t) -> svfloat32x2_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.create2.nxv8f32.nxv4f32"
+        )]
+        fn _svcreate2_f32(x0: svfloat32_t, x1: svfloat32_t) -> svfloat32x2_t;
+    }
+    unsafe { _svcreate2_f32(x0, x1) }
+}
+#[doc = "Create a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate2_f64(x0: svfloat64_t, x1: svfloat64_t) -> svfloat64x2_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.create2.nxv4f64.nxv2f64"
+        )]
+        fn _svcreate2_f64(x0: svfloat64_t, x1: svfloat64_t) -> svfloat64x2_t;
+    }
+    unsafe { _svcreate2_f64(x0, x1) }
+}
+#[doc = "Create a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate2_s8(x0: svint8_t, x1: svint8_t) -> svint8x2_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.create2.nxv32i8.nxv16i8"
+        )]
+        fn _svcreate2_s8(x0: svint8_t, x1: svint8_t) -> svint8x2_t;
+    }
+    unsafe { _svcreate2_s8(x0, x1) }
+}
+#[doc = "Create a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate2_s16(x0: svint16_t, x1: svint16_t) -> svint16x2_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.create2.nxv16i16.nxv8i16"
+        )]
+        fn _svcreate2_s16(x0: svint16_t, x1: svint16_t) -> svint16x2_t;
+    }
+    unsafe { _svcreate2_s16(x0, x1) }
+}
+#[doc = "Create a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate2_s32(x0: svint32_t, x1: svint32_t) -> svint32x2_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.create2.nxv8i32.nxv4i32"
+        )]
+        fn _svcreate2_s32(x0: svint32_t, x1: svint32_t) -> svint32x2_t;
+    }
+    unsafe { _svcreate2_s32(x0, x1) }
+}
+#[doc = "Create a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate2_s64(x0: svint64_t, x1: svint64_t) -> svint64x2_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.create2.nxv4i64.nxv2i64"
+        )]
+        fn _svcreate2_s64(x0: svint64_t, x1: svint64_t) -> svint64x2_t;
+    }
+    unsafe { _svcreate2_s64(x0, x1) }
+}
+#[doc = "Create a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate2_u8(x0: svuint8_t, x1: svuint8_t) -> svuint8x2_t {
+    unsafe { svcreate2_s8(x0.as_signed(), x1.as_signed()).as_unsigned() }
+}
+#[doc = "Create a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate2_u16(x0: svuint16_t, x1: svuint16_t) -> svuint16x2_t {
+    unsafe { svcreate2_s16(x0.as_signed(), x1.as_signed()).as_unsigned() }
+}
+#[doc = "Create a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate2_u32(x0: svuint32_t, x1: svuint32_t) -> svuint32x2_t {
+    unsafe { svcreate2_s32(x0.as_signed(), x1.as_signed()).as_unsigned() }
+}
+#[doc = "Create a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate2_u64(x0: svuint64_t, x1: svuint64_t) -> svuint64x2_t {
+    unsafe { svcreate2_s64(x0.as_signed(), x1.as_signed()).as_unsigned() }
+}
+#[doc = "Create a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate3_f32(x0: svfloat32_t, x1: svfloat32_t, x2: svfloat32_t) -> svfloat32x3_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.create3.nxv12f32.nxv4f32"
+        )]
+        fn _svcreate3_f32(x0: svfloat32_t, x1: svfloat32_t, x2: svfloat32_t) -> svfloat32x3_t;
+    }
+    unsafe { _svcreate3_f32(x0, x1, x2) }
+}
+#[doc = "Create a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate3_f64(x0: svfloat64_t, x1: svfloat64_t, x2: svfloat64_t) -> svfloat64x3_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.create3.nxv6f64.nxv2f64"
+        )]
+        fn _svcreate3_f64(x0: svfloat64_t, x1: svfloat64_t, x2: svfloat64_t) -> svfloat64x3_t;
+    }
+    unsafe { _svcreate3_f64(x0, x1, x2) }
+}
+#[doc = "Create a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate3_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t) -> svint8x3_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.create3.nxv48i8.nxv16i8"
+        )]
+        fn _svcreate3_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t) -> svint8x3_t;
+    }
+    unsafe { _svcreate3_s8(x0, x1, x2) }
+}
+#[doc = "Create a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate3_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t) -> svint16x3_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.create3.nxv24i16.nxv8i16"
+        )]
+        fn _svcreate3_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t) -> svint16x3_t;
+    }
+    unsafe { _svcreate3_s16(x0, x1, x2) }
+}
+#[doc = "Create a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate3_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t) -> svint32x3_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.create3.nxv12i32.nxv4i32"
+        )]
+        fn _svcreate3_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t) -> svint32x3_t;
+    }
+    unsafe { _svcreate3_s32(x0, x1, x2) }
+}
+#[doc = "Create a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate3_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t) -> svint64x3_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.create3.nxv6i64.nxv2i64"
+        )]
+        fn _svcreate3_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t) -> svint64x3_t;
+    }
+    unsafe { _svcreate3_s64(x0, x1, x2) }
+}
+#[doc = "Create a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate3_u8(x0: svuint8_t, x1: svuint8_t, x2: svuint8_t) -> svuint8x3_t {
+    unsafe { svcreate3_s8(x0.as_signed(), x1.as_signed(), x2.as_signed()).as_unsigned() }
+}
+#[doc = "Create a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate3_u16(x0: svuint16_t, x1: svuint16_t, x2: svuint16_t) -> svuint16x3_t {
+    unsafe { svcreate3_s16(x0.as_signed(), x1.as_signed(), x2.as_signed()).as_unsigned() }
+}
+#[doc = "Create a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate3_u32(x0: svuint32_t, x1: svuint32_t, x2: svuint32_t) -> svuint32x3_t {
+    unsafe { svcreate3_s32(x0.as_signed(), x1.as_signed(), x2.as_signed()).as_unsigned() }
+}
+#[doc = "Create a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate3_u64(x0: svuint64_t, x1: svuint64_t, x2: svuint64_t) -> svuint64x3_t {
+    unsafe { svcreate3_s64(x0.as_signed(), x1.as_signed(), x2.as_signed()).as_unsigned() }
+}
+#[doc = "Create a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate4_f32(
+    x0: svfloat32_t,
+    x1: svfloat32_t,
+    x2: svfloat32_t,
+    x3: svfloat32_t,
+) -> svfloat32x4_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.create4.nxv16f32.nxv4f32"
+        )]
+        fn _svcreate4_f32(
+            x0: svfloat32_t,
+            x1: svfloat32_t,
+            x2: svfloat32_t,
+            x3: svfloat32_t,
+        ) -> svfloat32x4_t;
+    }
+    unsafe { _svcreate4_f32(x0, x1, x2, x3) }
+}
+#[doc = "Create a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate4_f64(
+    x0: svfloat64_t,
+    x1: svfloat64_t,
+    x2: svfloat64_t,
+    x3: svfloat64_t,
+) -> svfloat64x4_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.create4.nxv8f64.nxv2f64"
+        )]
+        fn _svcreate4_f64(
+            x0: svfloat64_t,
+            x1: svfloat64_t,
+            x2: svfloat64_t,
+            x3: svfloat64_t,
+        ) -> svfloat64x4_t;
+    }
+    unsafe { _svcreate4_f64(x0, x1, x2, x3) }
+}
+#[doc = "Create a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate4_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t, x3: svint8_t) -> svint8x4_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.create4.nxv64i8.nxv16i8"
+        )]
+        fn _svcreate4_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t, x3: svint8_t) -> svint8x4_t;
+    }
+    unsafe { _svcreate4_s8(x0, x1, x2, x3) }
+}
+#[doc = "Create a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate4_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t, x3: svint16_t) -> svint16x4_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.create4.nxv32i16.nxv8i16"
+        )]
+        fn _svcreate4_s16(
+            x0: svint16_t,
+            x1: svint16_t,
+            x2: svint16_t,
+            x3: svint16_t,
+        ) -> svint16x4_t;
+    }
+    unsafe { _svcreate4_s16(x0, x1, x2, x3) }
+}
+#[doc = "Create a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate4_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t, x3: svint32_t) -> svint32x4_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.create4.nxv16i32.nxv4i32"
+        )]
+        fn _svcreate4_s32(
+            x0: svint32_t,
+            x1: svint32_t,
+            x2: svint32_t,
+            x3: svint32_t,
+        ) -> svint32x4_t;
+    }
+    unsafe { _svcreate4_s32(x0, x1, x2, x3) }
+}
+#[doc = "Create a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate4_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t, x3: svint64_t) -> svint64x4_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.create4.nxv8i64.nxv2i64"
+        )]
+        fn _svcreate4_s64(
+            x0: svint64_t,
+            x1: svint64_t,
+            x2: svint64_t,
+            x3: svint64_t,
+        ) -> svint64x4_t;
+    }
+    unsafe { _svcreate4_s64(x0, x1, x2, x3) }
+}
+#[doc = "Create a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate4_u8(x0: svuint8_t, x1: svuint8_t, x2: svuint8_t, x3: svuint8_t) -> svuint8x4_t {
+    unsafe {
+        svcreate4_s8(
+            x0.as_signed(),
+            x1.as_signed(),
+            x2.as_signed(),
+            x3.as_signed(),
+        )
+        .as_unsigned()
+    }
+}
+#[doc = "Create a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate4_u16(
+    x0: svuint16_t,
+    x1: svuint16_t,
+    x2: svuint16_t,
+    x3: svuint16_t,
+) -> svuint16x4_t {
+    unsafe {
+        svcreate4_s16(
+            x0.as_signed(),
+            x1.as_signed(),
+            x2.as_signed(),
+            x3.as_signed(),
+        )
+        .as_unsigned()
+    }
+}
+#[doc = "Create a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate4_u32(
+    x0: svuint32_t,
+    x1: svuint32_t,
+    x2: svuint32_t,
+    x3: svuint32_t,
+) -> svuint32x4_t {
+    unsafe {
+        svcreate4_s32(
+            x0.as_signed(),
+            x1.as_signed(),
+            x2.as_signed(),
+            x3.as_signed(),
+        )
+        .as_unsigned()
+    }
+}
+#[doc = "Create a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svcreate4_u64(
+    x0: svuint64_t,
+    x1: svuint64_t,
+    x2: svuint64_t,
+    x3: svuint64_t,
+) -> svuint64x4_t {
+    unsafe {
+        svcreate4_s64(
+            x0.as_signed(),
+            x1.as_signed(),
+            x2.as_signed(),
+            x3.as_signed(),
+        )
+        .as_unsigned()
+    }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvt))]
+pub fn svcvt_f32_f64_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvt.f32f64")]
+        fn _svcvt_f32_f64_m(inactive: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t;
+    }
+    unsafe { _svcvt_f32_f64_m(inactive, pg.into(), op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvt))]
+pub fn svcvt_f32_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
+    unsafe { svcvt_f32_f64_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvt))]
+pub fn svcvt_f32_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
+    svcvt_f32_f64_m(svdup_n_f32(0.0), pg, op)
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvt))]
+pub fn svcvt_f64_f32_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat32_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvt.f64f32")]
+        fn _svcvt_f64_f32_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat32_t) -> svfloat64_t;
+    }
+    unsafe { _svcvt_f64_f32_m(inactive, pg.into(), op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvt))]
+pub fn svcvt_f64_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat64_t {
+    unsafe { svcvt_f64_f32_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvt))]
+pub fn svcvt_f64_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat64_t {
+    svcvt_f64_f32_m(svdup_n_f64(0.0), pg, op)
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(scvtf))]
+pub fn svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool_t, op: svint32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.scvtf.nxv4f32.nxv4i32"
+        )]
+        fn _svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t;
+    }
+    unsafe { _svcvt_f32_s32_m(inactive, pg.into(), op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(scvtf))]
+pub fn svcvt_f32_s32_x(pg: svbool_t, op: svint32_t) -> svfloat32_t {
+    unsafe { svcvt_f32_s32_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(scvtf))]
+pub fn svcvt_f32_s32_z(pg: svbool_t, op: svint32_t) -> svfloat32_t {
+    svcvt_f32_s32_m(svdup_n_f32(0.0), pg, op)
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(scvtf))]
+pub fn svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool_t, op: svint64_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.scvtf.f32i64")]
+        fn _svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t;
+    }
+    unsafe { _svcvt_f32_s64_m(inactive, pg.into(), op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(scvtf))]
+pub fn svcvt_f32_s64_x(pg: svbool_t, op: svint64_t) -> svfloat32_t {
+    unsafe { svcvt_f32_s64_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(scvtf))]
+pub fn svcvt_f32_s64_z(pg: svbool_t, op: svint64_t) -> svfloat32_t {
+    svcvt_f32_s64_m(svdup_n_f32(0.0), pg, op)
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ucvtf))]
+pub fn svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool_t, op: svuint32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ucvtf.nxv4f32.nxv4i32"
+        )]
+        fn _svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool4_t, op: svint32_t) -> svfloat32_t;
+    }
+    unsafe { _svcvt_f32_u32_m(inactive, pg.into(), op.as_signed()) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ucvtf))]
+pub fn svcvt_f32_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat32_t {
+    unsafe { svcvt_f32_u32_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ucvtf))]
+pub fn svcvt_f32_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat32_t {
+    svcvt_f32_u32_m(svdup_n_f32(0.0), pg, op)
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ucvtf))]
+pub fn svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool_t, op: svuint64_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ucvtf.f32i64")]
+        fn _svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool2_t, op: svint64_t) -> svfloat32_t;
+    }
+    unsafe { _svcvt_f32_u64_m(inactive, pg.into(), op.as_signed()) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ucvtf))]
+pub fn svcvt_f32_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat32_t {
+    unsafe { svcvt_f32_u64_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ucvtf))]
+pub fn svcvt_f32_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat32_t {
+    svcvt_f32_u64_m(svdup_n_f32(0.0), pg, op)
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(scvtf))]
+pub fn svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool_t, op: svint32_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv4i32"
+        )]
+        fn _svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t;
+    }
+    unsafe { _svcvt_f64_s32_m(inactive, pg.into(), op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(scvtf))]
+pub fn svcvt_f64_s32_x(pg: svbool_t, op: svint32_t) -> svfloat64_t {
+    unsafe { svcvt_f64_s32_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(scvtf))]
+pub fn svcvt_f64_s32_z(pg: svbool_t, op: svint32_t) -> svfloat64_t {
+    svcvt_f64_s32_m(svdup_n_f64(0.0), pg, op)
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(scvtf))]
+pub fn svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool_t, op: svint64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.scvtf.nxv2f64.nxv2i64"
+        )]
+        fn _svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t;
+    }
+    unsafe { _svcvt_f64_s64_m(inactive, pg.into(), op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(scvtf))]
+pub fn svcvt_f64_s64_x(pg: svbool_t, op: svint64_t) -> svfloat64_t {
+    unsafe { svcvt_f64_s64_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(scvtf))]
+pub fn svcvt_f64_s64_z(pg: svbool_t, op: svint64_t) -> svfloat64_t {
+    svcvt_f64_s64_m(svdup_n_f64(0.0), pg, op)
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ucvtf))]
+pub fn svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool_t, op: svuint32_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv4i32"
+        )]
+        fn _svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool2_t, op: svint32_t) -> svfloat64_t;
+    }
+    unsafe { _svcvt_f64_u32_m(inactive, pg.into(), op.as_signed()) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ucvtf))]
+pub fn svcvt_f64_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat64_t {
+    unsafe { svcvt_f64_u32_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ucvtf))]
+pub fn svcvt_f64_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat64_t {
+    svcvt_f64_u32_m(svdup_n_f64(0.0), pg, op)
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ucvtf))]
+pub fn svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool_t, op: svuint64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ucvtf.nxv2f64.nxv2i64"
+        )]
+        fn _svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool2_t, op: svint64_t) -> svfloat64_t;
+    }
+    unsafe { _svcvt_f64_u64_m(inactive, pg.into(), op.as_signed()) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ucvtf))]
+pub fn svcvt_f64_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat64_t {
+    unsafe { svcvt_f64_u64_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ucvtf))]
+pub fn svcvt_f64_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat64_t {
+    svcvt_f64_u64_m(svdup_n_f64(0.0), pg, op)
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzs))]
+pub fn svcvt_s32_f32_m(inactive: svint32_t, pg: svbool_t, op: svfloat32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i32f32")]
+        fn _svcvt_s32_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t;
+    }
+    unsafe { _svcvt_s32_f32_m(inactive, pg.into(), op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzs))]
+pub fn svcvt_s32_f32_x(pg: svbool_t, op: svfloat32_t) -> svint32_t {
+    unsafe { svcvt_s32_f32_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzs))]
+pub fn svcvt_s32_f32_z(pg: svbool_t, op: svfloat32_t) -> svint32_t {
+    svcvt_s32_f32_m(svdup_n_s32(0), pg, op)
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzs))]
+pub fn svcvt_s32_f64_m(inactive: svint32_t, pg: svbool_t, op: svfloat64_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i32f64")]
+        fn _svcvt_s32_f64_m(inactive: svint32_t, pg: svbool2_t, op: svfloat64_t) -> svint32_t;
+    }
+    unsafe { _svcvt_s32_f64_m(inactive, pg.into(), op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzs))]
+pub fn svcvt_s32_f64_x(pg: svbool_t, op: svfloat64_t) -> svint32_t {
+    unsafe { svcvt_s32_f64_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzs))]
+pub fn svcvt_s32_f64_z(pg: svbool_t, op: svfloat64_t) -> svint32_t {
+    svcvt_s32_f64_m(svdup_n_s32(0), pg, op)
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzs))]
+pub fn svcvt_s64_f32_m(inactive: svint64_t, pg: svbool_t, op: svfloat32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i64f32")]
+        fn _svcvt_s64_f32_m(inactive: svint64_t, pg: svbool2_t, op: svfloat32_t) -> svint64_t;
+    }
+    unsafe { _svcvt_s64_f32_m(inactive, pg.into(), op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzs))]
+pub fn svcvt_s64_f32_x(pg: svbool_t, op: svfloat32_t) -> svint64_t {
+    unsafe { svcvt_s64_f32_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzs))]
+pub fn svcvt_s64_f32_z(pg: svbool_t, op: svfloat32_t) -> svint64_t {
+    svcvt_s64_f32_m(svdup_n_s64(0), pg, op)
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzs))]
+pub fn svcvt_s64_f64_m(inactive: svint64_t, pg: svbool_t, op: svfloat64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzs.i64f64")]
+        fn _svcvt_s64_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t;
+    }
+    unsafe { _svcvt_s64_f64_m(inactive, pg.into(), op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzs))]
+pub fn svcvt_s64_f64_x(pg: svbool_t, op: svfloat64_t) -> svint64_t {
+    unsafe { svcvt_s64_f64_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzs))]
+pub fn svcvt_s64_f64_z(pg: svbool_t, op: svfloat64_t) -> svint64_t {
+    svcvt_s64_f64_m(svdup_n_s64(0), pg, op)
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzu))]
+pub fn svcvt_u32_f32_m(inactive: svuint32_t, pg: svbool_t, op: svfloat32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i32f32")]
+        fn _svcvt_u32_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t;
+    }
+    unsafe { _svcvt_u32_f32_m(inactive.as_signed(), pg.into(), op).as_unsigned() }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzu))]
+pub fn svcvt_u32_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint32_t {
+    unsafe { svcvt_u32_f32_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzu))]
+pub fn svcvt_u32_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint32_t {
+    svcvt_u32_f32_m(svdup_n_u32(0), pg, op)
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzu))]
+pub fn svcvt_u32_f64_m(inactive: svuint32_t, pg: svbool_t, op: svfloat64_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i32f64")]
+        fn _svcvt_u32_f64_m(inactive: svint32_t, pg: svbool2_t, op: svfloat64_t) -> svint32_t;
+    }
+    unsafe { _svcvt_u32_f64_m(inactive.as_signed(), pg.into(), op).as_unsigned() }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzu))]
+pub fn svcvt_u32_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint32_t {
+    unsafe { svcvt_u32_f64_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzu))]
+pub fn svcvt_u32_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint32_t {
+    svcvt_u32_f64_m(svdup_n_u32(0), pg, op)
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzu))]
+pub fn svcvt_u64_f32_m(inactive: svuint64_t, pg: svbool_t, op: svfloat32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i64f32")]
+        fn _svcvt_u64_f32_m(inactive: svint64_t, pg: svbool2_t, op: svfloat32_t) -> svint64_t;
+    }
+    unsafe { _svcvt_u64_f32_m(inactive.as_signed(), pg.into(), op).as_unsigned() }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzu))]
+pub fn svcvt_u64_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint64_t {
+    unsafe { svcvt_u64_f32_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzu))]
+pub fn svcvt_u64_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint64_t {
+    svcvt_u64_f32_m(svdup_n_u64(0), pg, op)
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzu))]
+pub fn svcvt_u64_f64_m(inactive: svuint64_t, pg: svbool_t, op: svfloat64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtzu.i64f64")]
+        fn _svcvt_u64_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t;
+    }
+    unsafe { _svcvt_u64_f64_m(inactive.as_signed(), pg.into(), op).as_unsigned() }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzu))]
+pub fn svcvt_u64_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint64_t {
+    unsafe { svcvt_u64_f64_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Floating-point convert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fcvtzu))]
+pub fn svcvt_u64_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint64_t {
+    svcvt_u64_f64_m(svdup_n_u64(0), pg, op)
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdiv))]
+pub fn svdiv_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdiv.nxv4f32")]
+        fn _svdiv_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svdiv_f32_m(pg.into(), op1, op2) }
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdiv))]
+pub fn svdiv_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svdiv_f32_m(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdiv))]
+pub fn svdiv_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svdiv_f32_m(pg, op1, op2)
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdiv))]
+pub fn svdiv_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svdiv_f32_x(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdiv))]
+pub fn svdiv_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svdiv_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdiv))]
+pub fn svdiv_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svdiv_f32_z(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdiv))]
+pub fn svdiv_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdiv.nxv2f64")]
+        fn _svdiv_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svdiv_f64_m(pg.into(), op1, op2) }
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdiv))]
+pub fn svdiv_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svdiv_f64_m(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdiv))]
+pub fn svdiv_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svdiv_f64_m(pg, op1, op2)
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdiv))]
+pub fn svdiv_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svdiv_f64_x(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdiv))]
+pub fn svdiv_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svdiv_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdiv))]
+pub fn svdiv_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svdiv_f64_z(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdiv))]
+pub fn svdiv_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv4i32")]
+        fn _svdiv_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svdiv_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdiv))]
+pub fn svdiv_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svdiv_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdiv))]
+pub fn svdiv_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svdiv_s32_m(pg, op1, op2)
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdiv))]
+pub fn svdiv_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svdiv_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdiv))]
+pub fn svdiv_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svdiv_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdiv))]
+pub fn svdiv_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svdiv_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdiv))]
+pub fn svdiv_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdiv.nxv2i64")]
+        fn _svdiv_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svdiv_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdiv))]
+pub fn svdiv_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svdiv_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdiv))]
+pub fn svdiv_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svdiv_s64_m(pg, op1, op2)
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdiv))]
+pub fn svdiv_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svdiv_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdiv))]
+pub fn svdiv_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svdiv_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdiv))]
+pub fn svdiv_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svdiv_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udiv))]
+pub fn svdiv_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udiv.nxv4i32")]
+        fn _svdiv_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svdiv_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udiv))]
+pub fn svdiv_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svdiv_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udiv))]
+pub fn svdiv_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svdiv_u32_m(pg, op1, op2)
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udiv))]
+pub fn svdiv_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svdiv_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udiv))]
+pub fn svdiv_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svdiv_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udiv))]
+pub fn svdiv_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svdiv_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udiv))]
+pub fn svdiv_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udiv.nxv2i64")]
+        fn _svdiv_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svdiv_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udiv))]
+pub fn svdiv_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svdiv_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udiv))]
+pub fn svdiv_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svdiv_u64_m(pg, op1, op2)
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udiv))]
+pub fn svdiv_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svdiv_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udiv))]
+pub fn svdiv_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svdiv_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Divide"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udiv))]
+pub fn svdiv_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svdiv_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdivr))]
+pub fn svdivr_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdivr.nxv4f32")]
+        fn _svdivr_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svdivr_f32_m(pg.into(), op1, op2) }
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdivr))]
+pub fn svdivr_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svdivr_f32_m(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdivr))]
+pub fn svdivr_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svdivr_f32_m(pg, op1, op2)
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdivr))]
+pub fn svdivr_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svdivr_f32_x(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdivr))]
+pub fn svdivr_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svdivr_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdivr))]
+pub fn svdivr_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svdivr_f32_z(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdivr))]
+pub fn svdivr_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fdivr.nxv2f64")]
+        fn _svdivr_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svdivr_f64_m(pg.into(), op1, op2) }
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdivr))]
+pub fn svdivr_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svdivr_f64_m(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdivr))]
+pub fn svdivr_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svdivr_f64_m(pg, op1, op2)
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdivr))]
+pub fn svdivr_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svdivr_f64_x(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdivr))]
+pub fn svdivr_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svdivr_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fdivr))]
+pub fn svdivr_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svdivr_f64_z(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdivr))]
+pub fn svdivr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdivr.nxv4i32")]
+        fn _svdivr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svdivr_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdivr))]
+pub fn svdivr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svdivr_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdivr))]
+pub fn svdivr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svdivr_s32_m(pg, op1, op2)
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdivr))]
+pub fn svdivr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svdivr_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdivr))]
+pub fn svdivr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svdivr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdivr))]
+pub fn svdivr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svdivr_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdivr))]
+pub fn svdivr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdivr.nxv2i64")]
+        fn _svdivr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svdivr_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdivr))]
+pub fn svdivr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svdivr_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdivr))]
+pub fn svdivr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svdivr_s64_m(pg, op1, op2)
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdivr))]
+pub fn svdivr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svdivr_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdivr))]
+pub fn svdivr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svdivr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdivr))]
+pub fn svdivr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svdivr_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udivr))]
+pub fn svdivr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udivr.nxv4i32")]
+        fn _svdivr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svdivr_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udivr))]
+pub fn svdivr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svdivr_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udivr))]
+pub fn svdivr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svdivr_u32_m(pg, op1, op2)
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udivr))]
+pub fn svdivr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svdivr_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udivr))]
+pub fn svdivr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svdivr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udivr))]
+pub fn svdivr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svdivr_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udivr))]
+pub fn svdivr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udivr.nxv2i64")]
+        fn _svdivr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svdivr_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udivr))]
+pub fn svdivr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svdivr_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udivr))]
+pub fn svdivr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svdivr_u64_m(pg, op1, op2)
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udivr))]
+pub fn svdivr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svdivr_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udivr))]
+pub fn svdivr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svdivr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Divide reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udivr))]
+pub fn svdivr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svdivr_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Dot product"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdot, IMM_INDEX = 0))]
+pub fn svdot_lane_s32<const IMM_INDEX: i32>(
+    op1: svint32_t,
+    op2: svint8_t,
+    op3: svint8_t,
+) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sdot.lane.nxv4i32"
+        )]
+        fn _svdot_lane_s32(
+            op1: svint32_t,
+            op2: svint8_t,
+            op3: svint8_t,
+            imm_index: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svdot_lane_s32(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Dot product"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdot, IMM_INDEX = 0))]
+pub fn svdot_lane_s64<const IMM_INDEX: i32>(
+    op1: svint64_t,
+    op2: svint16_t,
+    op3: svint16_t,
+) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sdot.lane.nxv2i64"
+        )]
+        fn _svdot_lane_s64(
+            op1: svint64_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            imm_index: i32,
+        ) -> svint64_t;
+    }
+    unsafe { _svdot_lane_s64(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Dot product"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udot, IMM_INDEX = 0))]
+pub fn svdot_lane_u32<const IMM_INDEX: i32>(
+    op1: svuint32_t,
+    op2: svuint8_t,
+    op3: svuint8_t,
+) -> svuint32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.udot.lane.nxv4i32"
+        )]
+        fn _svdot_lane_u32(
+            op1: svint32_t,
+            op2: svint8_t,
+            op3: svint8_t,
+            imm_index: i32,
+        ) -> svint32_t;
+    }
+    unsafe {
+        _svdot_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX).as_unsigned()
+    }
+}
+#[doc = "Dot product"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udot, IMM_INDEX = 0))]
+pub fn svdot_lane_u64<const IMM_INDEX: i32>(
+    op1: svuint64_t,
+    op2: svuint16_t,
+    op3: svuint16_t,
+) -> svuint64_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.udot.lane.nxv2i64"
+        )]
+        fn _svdot_lane_u64(
+            op1: svint64_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            imm_index: i32,
+        ) -> svint64_t;
+    }
+    unsafe {
+        _svdot_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX).as_unsigned()
+    }
+}
+#[doc = "Dot product"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdot))]
+pub fn svdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdot.nxv4i32")]
+        fn _svdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t;
+    }
+    unsafe { _svdot_s32(op1, op2, op3) }
+}
+#[doc = "Dot product"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdot))]
+pub fn svdot_n_s32(op1: svint32_t, op2: svint8_t, op3: i8) -> svint32_t {
+    svdot_s32(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Dot product"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdot))]
+pub fn svdot_s64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sdot.nxv2i64")]
+        fn _svdot_s64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t;
+    }
+    unsafe { _svdot_s64(op1, op2, op3) }
+}
+#[doc = "Dot product"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sdot))]
+pub fn svdot_n_s64(op1: svint64_t, op2: svint16_t, op3: i16) -> svint64_t {
+    svdot_s64(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Dot product"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udot))]
+pub fn svdot_u32(op1: svuint32_t, op2: svuint8_t, op3: svuint8_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udot.nxv4i32")]
+        fn _svdot_u32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t;
+    }
+    unsafe { _svdot_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Dot product"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udot))]
+pub fn svdot_n_u32(op1: svuint32_t, op2: svuint8_t, op3: u8) -> svuint32_t {
+    svdot_u32(op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Dot product"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udot))]
+pub fn svdot_u64(op1: svuint64_t, op2: svuint16_t, op3: svuint16_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.udot.nxv2i64")]
+        fn _svdot_u64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t;
+    }
+    unsafe { _svdot_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Dot product"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(udot))]
+pub fn svdot_n_u64(op1: svuint64_t, op2: svuint16_t, op3: u16) -> svuint64_t {
+    svdot_u64(op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdup_lane_f32(data: svfloat32_t, index: u32) -> svfloat32_t {
+    svtbl_f32(data, svdup_n_u32(index))
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdup_lane_f64(data: svfloat64_t, index: u64) -> svfloat64_t {
+    svtbl_f64(data, svdup_n_u64(index))
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdup_lane_s8(data: svint8_t, index: u8) -> svint8_t {
+    svtbl_s8(data, svdup_n_u8(index))
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdup_lane_s16(data: svint16_t, index: u16) -> svint16_t {
+    svtbl_s16(data, svdup_n_u16(index))
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdup_lane_s32(data: svint32_t, index: u32) -> svint32_t {
+    svtbl_s32(data, svdup_n_u32(index))
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdup_lane_s64(data: svint64_t, index: u64) -> svint64_t {
+    svtbl_s64(data, svdup_n_u64(index))
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdup_lane_u8(data: svuint8_t, index: u8) -> svuint8_t {
+    svtbl_u8(data, svdup_n_u8(index))
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdup_lane_u16(data: svuint16_t, index: u16) -> svuint16_t {
+    svtbl_u16(data, svdup_n_u16(index))
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdup_lane_u32(data: svuint32_t, index: u32) -> svuint32_t {
+    svtbl_u32(data, svdup_n_u32(index))
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdup_lane_u64(data: svuint64_t, index: u64) -> svuint64_t {
+    svtbl_u64(data, svdup_n_u64(index))
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sbfx))]
+#[cfg_attr(test, assert_instr(whilelo))]
+pub fn svdup_n_b8(op: bool) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i1")]
+        fn _svdup_n_b8(op: bool) -> svbool_t;
+    }
+    unsafe { _svdup_n_b8(op) }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sbfx))]
+#[cfg_attr(test, assert_instr(whilelo))]
+pub fn svdup_n_b16(op: bool) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i1")]
+        fn _svdup_n_b16(op: bool) -> svbool8_t;
+    }
+    unsafe { _svdup_n_b16(op).into() }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sbfx))]
+#[cfg_attr(test, assert_instr(whilelo))]
+pub fn svdup_n_b32(op: bool) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i1")]
+        fn _svdup_n_b32(op: bool) -> svbool4_t;
+    }
+    unsafe { _svdup_n_b32(op).into() }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sbfx))]
+#[cfg_attr(test, assert_instr(whilelo))]
+pub fn svdup_n_b64(op: bool) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i1")]
+        fn _svdup_n_b64(op: bool) -> svbool2_t;
+    }
+    unsafe { _svdup_n_b64(op).into() }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_f32(op: f32) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4f32")]
+        fn _svdup_n_f32(op: f32) -> svfloat32_t;
+    }
+    unsafe { _svdup_n_f32(op) }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_f64(op: f64) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2f64")]
+        fn _svdup_n_f64(op: f64) -> svfloat64_t;
+    }
+    unsafe { _svdup_n_f64(op) }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_s8(op: i8) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv16i8")]
+        fn _svdup_n_s8(op: i8) -> svint8_t;
+    }
+    unsafe { _svdup_n_s8(op) }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_s16(op: i16) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv8i16")]
+        fn _svdup_n_s16(op: i16) -> svint16_t;
+    }
+    unsafe { _svdup_n_s16(op) }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_s32(op: i32) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv4i32")]
+        fn _svdup_n_s32(op: i32) -> svint32_t;
+    }
+    unsafe { _svdup_n_s32(op) }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_s64(op: i64) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.x.nxv2i64")]
+        fn _svdup_n_s64(op: i64) -> svint64_t;
+    }
+    unsafe { _svdup_n_s64(op) }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_u8(op: u8) -> svuint8_t {
+    unsafe { svdup_n_s8(op.as_signed()).as_unsigned() }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_u16(op: u16) -> svuint16_t {
+    unsafe { svdup_n_s16(op.as_signed()).as_unsigned() }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_u32(op: u32) -> svuint32_t {
+    unsafe { svdup_n_s32(op.as_signed()).as_unsigned() }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_u64(op: u64) -> svuint64_t {
+    unsafe { svdup_n_s64(op.as_signed()).as_unsigned() }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_f32_m(inactive: svfloat32_t, pg: svbool_t, op: f32) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv4f32")]
+        fn _svdup_n_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: f32) -> svfloat32_t;
+    }
+    unsafe { _svdup_n_f32_m(inactive, pg.into(), op) }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_f32_x(pg: svbool_t, op: f32) -> svfloat32_t {
+    svdup_n_f32_m(svdup_n_f32(0.0), pg, op)
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_f32_z(pg: svbool_t, op: f32) -> svfloat32_t {
+    svdup_n_f32_m(svdup_n_f32(0.0), pg, op)
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_f64_m(inactive: svfloat64_t, pg: svbool_t, op: f64) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv2f64")]
+        fn _svdup_n_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: f64) -> svfloat64_t;
+    }
+    unsafe { _svdup_n_f64_m(inactive, pg.into(), op) }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_f64_x(pg: svbool_t, op: f64) -> svfloat64_t {
+    svdup_n_f64_m(svdup_n_f64(0.0), pg, op)
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_f64_z(pg: svbool_t, op: f64) -> svfloat64_t {
+    svdup_n_f64_m(svdup_n_f64(0.0), pg, op)
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_s8_m(inactive: svint8_t, pg: svbool_t, op: i8) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv16i8")]
+        fn _svdup_n_s8_m(inactive: svint8_t, pg: svbool_t, op: i8) -> svint8_t;
+    }
+    unsafe { _svdup_n_s8_m(inactive, pg, op) }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_s8_x(pg: svbool_t, op: i8) -> svint8_t {
+    svdup_n_s8_m(svdup_n_s8(0), pg, op)
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_s8_z(pg: svbool_t, op: i8) -> svint8_t {
+    svdup_n_s8_m(svdup_n_s8(0), pg, op)
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_s16_m(inactive: svint16_t, pg: svbool_t, op: i16) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv8i16")]
+        fn _svdup_n_s16_m(inactive: svint16_t, pg: svbool8_t, op: i16) -> svint16_t;
+    }
+    unsafe { _svdup_n_s16_m(inactive, pg.into(), op) }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_s16_x(pg: svbool_t, op: i16) -> svint16_t {
+    svdup_n_s16_m(svdup_n_s16(0), pg, op)
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_s16_z(pg: svbool_t, op: i16) -> svint16_t {
+    svdup_n_s16_m(svdup_n_s16(0), pg, op)
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_s32_m(inactive: svint32_t, pg: svbool_t, op: i32) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv4i32")]
+        fn _svdup_n_s32_m(inactive: svint32_t, pg: svbool4_t, op: i32) -> svint32_t;
+    }
+    unsafe { _svdup_n_s32_m(inactive, pg.into(), op) }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_s32_x(pg: svbool_t, op: i32) -> svint32_t {
+    svdup_n_s32_m(svdup_n_s32(0), pg, op)
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_s32_z(pg: svbool_t, op: i32) -> svint32_t {
+    svdup_n_s32_m(svdup_n_s32(0), pg, op)
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_s64_m(inactive: svint64_t, pg: svbool_t, op: i64) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.dup.nxv2i64")]
+        fn _svdup_n_s64_m(inactive: svint64_t, pg: svbool2_t, op: i64) -> svint64_t;
+    }
+    unsafe { _svdup_n_s64_m(inactive, pg.into(), op) }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_s64_x(pg: svbool_t, op: i64) -> svint64_t {
+    svdup_n_s64_m(svdup_n_s64(0), pg, op)
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_s64_z(pg: svbool_t, op: i64) -> svint64_t {
+    svdup_n_s64_m(svdup_n_s64(0), pg, op)
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_u8_m(inactive: svuint8_t, pg: svbool_t, op: u8) -> svuint8_t {
+    unsafe { svdup_n_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_u8_x(pg: svbool_t, op: u8) -> svuint8_t {
+    svdup_n_u8_m(svdup_n_u8(0), pg, op)
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_u8_z(pg: svbool_t, op: u8) -> svuint8_t {
+    svdup_n_u8_m(svdup_n_u8(0), pg, op)
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_u16_m(inactive: svuint16_t, pg: svbool_t, op: u16) -> svuint16_t {
+    unsafe { svdup_n_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_u16_x(pg: svbool_t, op: u16) -> svuint16_t {
+    svdup_n_u16_m(svdup_n_u16(0), pg, op)
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_u16_z(pg: svbool_t, op: u16) -> svuint16_t {
+    svdup_n_u16_m(svdup_n_u16(0), pg, op)
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_u32_m(inactive: svuint32_t, pg: svbool_t, op: u32) -> svuint32_t {
+    unsafe { svdup_n_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_u32_x(pg: svbool_t, op: u32) -> svuint32_t {
+    svdup_n_u32_m(svdup_n_u32(0), pg, op)
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_u32_z(pg: svbool_t, op: u32) -> svuint32_t {
+    svdup_n_u32_m(svdup_n_u32(0), pg, op)
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_u64_m(inactive: svuint64_t, pg: svbool_t, op: u64) -> svuint64_t {
+    unsafe { svdup_n_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_u64_x(pg: svbool_t, op: u64) -> svuint64_t {
+    svdup_n_u64_m(svdup_n_u64(0), pg, op)
+}
+#[doc = "Broadcast a scalar value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svdup_n_u64_z(pg: svbool_t, op: u64) -> svuint64_t {
+    svdup_n_u64_m(svdup_n_u64(0), pg, op)
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdupq_lane_f32(data: svfloat32_t, index: u64) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.dupq.lane.nxv4f32"
+        )]
+        fn _svdupq_lane_f32(data: svfloat32_t, index: i64) -> svfloat32_t;
+    }
+    unsafe { _svdupq_lane_f32(data, index.as_signed()) }
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdupq_lane_f64(data: svfloat64_t, index: u64) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.dupq.lane.nxv2f64"
+        )]
+        fn _svdupq_lane_f64(data: svfloat64_t, index: i64) -> svfloat64_t;
+    }
+    unsafe { _svdupq_lane_f64(data, index.as_signed()) }
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdupq_lane_s8(data: svint8_t, index: u64) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.dupq.lane.nxv16i8"
+        )]
+        fn _svdupq_lane_s8(data: svint8_t, index: i64) -> svint8_t;
+    }
+    unsafe { _svdupq_lane_s8(data, index.as_signed()) }
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdupq_lane_s16(data: svint16_t, index: u64) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.dupq.lane.nxv8i16"
+        )]
+        fn _svdupq_lane_s16(data: svint16_t, index: i64) -> svint16_t;
+    }
+    unsafe { _svdupq_lane_s16(data, index.as_signed()) }
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdupq_lane_s32(data: svint32_t, index: u64) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.dupq.lane.nxv4i32"
+        )]
+        fn _svdupq_lane_s32(data: svint32_t, index: i64) -> svint32_t;
+    }
+    unsafe { _svdupq_lane_s32(data, index.as_signed()) }
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdupq_lane_s64(data: svint64_t, index: u64) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.dupq.lane.nxv2i64"
+        )]
+        fn _svdupq_lane_s64(data: svint64_t, index: i64) -> svint64_t;
+    }
+    unsafe { _svdupq_lane_s64(data, index.as_signed()) }
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdupq_lane_u8(data: svuint8_t, index: u64) -> svuint8_t {
+    unsafe { svdupq_lane_s8(data.as_signed(), index).as_unsigned() }
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdupq_lane_u16(data: svuint16_t, index: u64) -> svuint16_t {
+    unsafe { svdupq_lane_s16(data.as_signed(), index).as_unsigned() }
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdupq_lane_u32(data: svuint32_t, index: u64) -> svuint32_t {
+    unsafe { svdupq_lane_s32(data.as_signed(), index).as_unsigned() }
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svdupq_lane_u64(data: svuint64_t, index: u64) -> svuint64_t {
+    unsafe { svdupq_lane_s64(data.as_signed(), index).as_unsigned() }
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svdupq_n_b16(
+    x0: bool,
+    x1: bool,
+    x2: bool,
+    x3: bool,
+    x4: bool,
+    x5: bool,
+    x6: bool,
+    x7: bool,
+) -> svbool_t {
+    let op1 = svdupq_n_s16(
+        x0 as i16, x1 as i16, x2 as i16, x3 as i16, x4 as i16, x5 as i16, x6 as i16, x7 as i16,
+    );
+    svcmpne_wide_s16(svptrue_b16(), op1, svdup_n_s64(0))
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svdupq_n_b32(x0: bool, x1: bool, x2: bool, x3: bool) -> svbool_t {
+    let op1 = svdupq_n_s32(x0 as i32, x1 as i32, x2 as i32, x3 as i32);
+    svcmpne_wide_s32(svptrue_b32(), op1, svdup_n_s64(0))
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svdupq_n_b64(x0: bool, x1: bool) -> svbool_t {
+    let op1 = svdupq_n_s64(x0 as i64, x1 as i64);
+    svcmpne_s64(svptrue_b64(), op1, svdup_n_s64(0))
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svdupq_n_b8(
+    x0: bool,
+    x1: bool,
+    x2: bool,
+    x3: bool,
+    x4: bool,
+    x5: bool,
+    x6: bool,
+    x7: bool,
+    x8: bool,
+    x9: bool,
+    x10: bool,
+    x11: bool,
+    x12: bool,
+    x13: bool,
+    x14: bool,
+    x15: bool,
+) -> svbool_t {
+    let op1 = svdupq_n_s8(
+        x0 as i8, x1 as i8, x2 as i8, x3 as i8, x4 as i8, x5 as i8, x6 as i8, x7 as i8, x8 as i8,
+        x9 as i8, x10 as i8, x11 as i8, x12 as i8, x13 as i8, x14 as i8, x15 as i8,
+    );
+    svcmpne_wide_s8(svptrue_b8(), op1, svdup_n_s64(0))
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_f32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svdupq_n_f32(x0: f32, x1: f32, x2: f32, x3: f32) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.experimental.vector.insert.nxv4f32.v4f32"
+        )]
+        fn _svdupq_n_f32(op0: svfloat32_t, op1: float32x4_t, idx: i64) -> svfloat32_t;
+    }
+    unsafe {
+        let op = _svdupq_n_f32(
+            simd_reinterpret(()),
+            crate::mem::transmute([x0, x1, x2, x3]),
+            0,
+        );
+        svdupq_lane_f32(op, 0)
+    }
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svdupq_n_s32(x0: i32, x1: i32, x2: i32, x3: i32) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.experimental.vector.insert.nxv4i32.v4i32"
+        )]
+        fn _svdupq_n_s32(op0: svint32_t, op1: int32x4_t, idx: i64) -> svint32_t;
+    }
+    unsafe {
+        let op = _svdupq_n_s32(
+            simd_reinterpret(()),
+            crate::mem::transmute([x0, x1, x2, x3]),
+            0,
+        );
+        svdupq_lane_s32(op, 0)
+    }
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svdupq_n_u32(x0: u32, x1: u32, x2: u32, x3: u32) -> svuint32_t {
+    unsafe {
+        svdupq_n_s32(
+            x0.as_signed(),
+            x1.as_signed(),
+            x2.as_signed(),
+            x3.as_signed(),
+        )
+        .as_unsigned()
+    }
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_f64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svdupq_n_f64(x0: f64, x1: f64) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.experimental.vector.insert.nxv2f64.v2f64"
+        )]
+        fn _svdupq_n_f64(op0: svfloat64_t, op1: float64x2_t, idx: i64) -> svfloat64_t;
+    }
+    unsafe {
+        let op = _svdupq_n_f64(simd_reinterpret(()), crate::mem::transmute([x0, x1]), 0);
+        svdupq_lane_f64(op, 0)
+    }
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svdupq_n_s64(x0: i64, x1: i64) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.experimental.vector.insert.nxv2i64.v2i64"
+        )]
+        fn _svdupq_n_s64(op0: svint64_t, op1: int64x2_t, idx: i64) -> svint64_t;
+    }
+    unsafe {
+        let op = _svdupq_n_s64(simd_reinterpret(()), crate::mem::transmute([x0, x1]), 0);
+        svdupq_lane_s64(op, 0)
+    }
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svdupq_n_u64(x0: u64, x1: u64) -> svuint64_t {
+    unsafe { svdupq_n_s64(x0.as_signed(), x1.as_signed()).as_unsigned() }
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svdupq_n_s16(
+    x0: i16,
+    x1: i16,
+    x2: i16,
+    x3: i16,
+    x4: i16,
+    x5: i16,
+    x6: i16,
+    x7: i16,
+) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.experimental.vector.insert.nxv8i16.v8i16"
+        )]
+        fn _svdupq_n_s16(op0: svint16_t, op1: int16x8_t, idx: i64) -> svint16_t;
+    }
+    unsafe {
+        let op = _svdupq_n_s16(
+            simd_reinterpret(()),
+            crate::mem::transmute([x0, x1, x2, x3, x4, x5, x6, x7]),
+            0,
+        );
+        svdupq_lane_s16(op, 0)
+    }
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svdupq_n_u16(
+    x0: u16,
+    x1: u16,
+    x2: u16,
+    x3: u16,
+    x4: u16,
+    x5: u16,
+    x6: u16,
+    x7: u16,
+) -> svuint16_t {
+    unsafe {
+        svdupq_n_s16(
+            x0.as_signed(),
+            x1.as_signed(),
+            x2.as_signed(),
+            x3.as_signed(),
+            x4.as_signed(),
+            x5.as_signed(),
+            x6.as_signed(),
+            x7.as_signed(),
+        )
+        .as_unsigned()
+    }
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svdupq_n_s8(
+    x0: i8,
+    x1: i8,
+    x2: i8,
+    x3: i8,
+    x4: i8,
+    x5: i8,
+    x6: i8,
+    x7: i8,
+    x8: i8,
+    x9: i8,
+    x10: i8,
+    x11: i8,
+    x12: i8,
+    x13: i8,
+    x14: i8,
+    x15: i8,
+) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.experimental.vector.insert.nxv16i8.v16i8"
+        )]
+        fn _svdupq_n_s8(op0: svint8_t, op1: int8x16_t, idx: i64) -> svint8_t;
+    }
+    unsafe {
+        let op = _svdupq_n_s8(
+            simd_reinterpret(()),
+            crate::mem::transmute([
+                x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15,
+            ]),
+            0,
+        );
+        svdupq_lane_s8(op, 0)
+    }
+}
+#[doc = "Broadcast a quadword of scalars"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svdupq_n_u8(
+    x0: u8,
+    x1: u8,
+    x2: u8,
+    x3: u8,
+    x4: u8,
+    x5: u8,
+    x6: u8,
+    x7: u8,
+    x8: u8,
+    x9: u8,
+    x10: u8,
+    x11: u8,
+    x12: u8,
+    x13: u8,
+    x14: u8,
+    x15: u8,
+) -> svuint8_t {
+    unsafe {
+        svdupq_n_s8(
+            x0.as_signed(),
+            x1.as_signed(),
+            x2.as_signed(),
+            x3.as_signed(),
+            x4.as_signed(),
+            x5.as_signed(),
+            x6.as_signed(),
+            x7.as_signed(),
+            x8.as_signed(),
+            x9.as_signed(),
+            x10.as_signed(),
+            x11.as_signed(),
+            x12.as_signed(),
+            x13.as_signed(),
+            x14.as_signed(),
+            x15.as_signed(),
+        )
+        .as_unsigned()
+    }
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_b]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.z.nvx16i1")]
+        fn _sveor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
+    }
+    unsafe { _sveor_b_z(pg, op1, op2) }
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv16i8")]
+        fn _sveor_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _sveor_s8_m(pg, op1, op2) }
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    sveor_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    sveor_s8_m(pg, op1, op2)
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    sveor_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    sveor_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    sveor_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv8i16")]
+        fn _sveor_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _sveor_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    sveor_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    sveor_s16_m(pg, op1, op2)
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    sveor_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    sveor_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    sveor_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv4i32")]
+        fn _sveor_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _sveor_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    sveor_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    sveor_s32_m(pg, op1, op2)
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    sveor_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    sveor_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    sveor_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor.nxv2i64")]
+        fn _sveor_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _sveor_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    sveor_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    sveor_s64_m(pg, op1, op2)
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    sveor_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    sveor_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    sveor_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { sveor_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    sveor_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    sveor_u8_m(pg, op1, op2)
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    sveor_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    sveor_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    sveor_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { sveor_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    sveor_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    sveor_u16_m(pg, op1, op2)
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    sveor_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    sveor_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    sveor_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { sveor_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    sveor_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    sveor_u32_m(pg, op1, op2)
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    sveor_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    sveor_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    sveor_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { sveor_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    sveor_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    sveor_u64_m(pg, op1, op2)
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    sveor_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    sveor_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Bitwise exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eor))]
+pub fn sveor_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    sveor_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Bitwise exclusive OR reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eorv))]
+pub fn sveorv_s8(pg: svbool_t, op: svint8_t) -> i8 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv16i8")]
+        fn _sveorv_s8(pg: svbool_t, op: svint8_t) -> i8;
+    }
+    unsafe { _sveorv_s8(pg, op) }
+}
+#[doc = "Bitwise exclusive OR reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eorv))]
+pub fn sveorv_s16(pg: svbool_t, op: svint16_t) -> i16 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv8i16")]
+        fn _sveorv_s16(pg: svbool8_t, op: svint16_t) -> i16;
+    }
+    unsafe { _sveorv_s16(pg.into(), op) }
+}
+#[doc = "Bitwise exclusive OR reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eorv))]
+pub fn sveorv_s32(pg: svbool_t, op: svint32_t) -> i32 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv4i32")]
+        fn _sveorv_s32(pg: svbool4_t, op: svint32_t) -> i32;
+    }
+    unsafe { _sveorv_s32(pg.into(), op) }
+}
+#[doc = "Bitwise exclusive OR reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eorv))]
+pub fn sveorv_s64(pg: svbool_t, op: svint64_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorv.nxv2i64")]
+        fn _sveorv_s64(pg: svbool2_t, op: svint64_t) -> i64;
+    }
+    unsafe { _sveorv_s64(pg.into(), op) }
+}
+#[doc = "Bitwise exclusive OR reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eorv))]
+pub fn sveorv_u8(pg: svbool_t, op: svuint8_t) -> u8 {
+    unsafe { sveorv_s8(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise exclusive OR reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eorv))]
+pub fn sveorv_u16(pg: svbool_t, op: svuint16_t) -> u16 {
+    unsafe { sveorv_s16(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise exclusive OR reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eorv))]
+pub fn sveorv_u32(pg: svbool_t, op: svuint32_t) -> u32 {
+    unsafe { sveorv_s32(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise exclusive OR reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(eorv))]
+pub fn sveorv_u64(pg: svbool_t, op: svuint64_t) -> u64 {
+    unsafe { sveorv_s64(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Floating-point exponential accelerator"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexpa[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fexpa))]
+pub fn svexpa_f32(op: svuint32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.fexpa.x.nxv4f32 "
+        )]
+        fn _svexpa_f32(op: svint32_t) -> svfloat32_t;
+    }
+    unsafe { _svexpa_f32(op.as_signed()) }
+}
+#[doc = "Floating-point exponential accelerator"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexpa[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fexpa))]
+pub fn svexpa_f64(op: svuint64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.fexpa.x.nxv2f64 "
+        )]
+        fn _svexpa_f64(op: svint64_t) -> svfloat64_t;
+    }
+    unsafe { _svexpa_f64(op.as_signed()) }
+}
+#[doc = "Extract vector from pair of vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
+pub fn svext_f32<const IMM3: i32>(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    static_assert_range!(IMM3, 0, 63);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv4f32")]
+        fn _svext_f32(op1: svfloat32_t, op2: svfloat32_t, imm3: i32) -> svfloat32_t;
+    }
+    unsafe { _svext_f32(op1, op2, IMM3) }
+}
+#[doc = "Extract vector from pair of vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
+pub fn svext_f64<const IMM3: i32>(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    static_assert_range!(IMM3, 0, 31);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv2f64")]
+        fn _svext_f64(op1: svfloat64_t, op2: svfloat64_t, imm3: i32) -> svfloat64_t;
+    }
+    unsafe { _svext_f64(op1, op2, IMM3) }
+}
+#[doc = "Extract vector from pair of vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
+pub fn svext_s8<const IMM3: i32>(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    static_assert_range!(IMM3, 0, 255);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv16i8")]
+        fn _svext_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t;
+    }
+    unsafe { _svext_s8(op1, op2, IMM3) }
+}
+#[doc = "Extract vector from pair of vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
+pub fn svext_s16<const IMM3: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    static_assert_range!(IMM3, 0, 127);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv8i16")]
+        fn _svext_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t;
+    }
+    unsafe { _svext_s16(op1, op2, IMM3) }
+}
+#[doc = "Extract vector from pair of vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
+pub fn svext_s32<const IMM3: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    static_assert_range!(IMM3, 0, 63);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv4i32")]
+        fn _svext_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t;
+    }
+    unsafe { _svext_s32(op1, op2, IMM3) }
+}
+#[doc = "Extract vector from pair of vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
+pub fn svext_s64<const IMM3: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    static_assert_range!(IMM3, 0, 31);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ext.nxv2i64")]
+        fn _svext_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t;
+    }
+    unsafe { _svext_s64(op1, op2, IMM3) }
+}
+#[doc = "Extract vector from pair of vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
+pub fn svext_u8<const IMM3: i32>(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    static_assert_range!(IMM3, 0, 255);
+    unsafe { svext_s8::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Extract vector from pair of vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
+pub fn svext_u16<const IMM3: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    static_assert_range!(IMM3, 0, 127);
+    unsafe { svext_s16::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Extract vector from pair of vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
+pub fn svext_u32<const IMM3: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    static_assert_range!(IMM3, 0, 63);
+    unsafe { svext_s32::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Extract vector from pair of vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ext, IMM3 = 1))]
+pub fn svext_u64<const IMM3: i32>(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    static_assert_range!(IMM3, 0, 31);
+    unsafe { svext_s64::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Sign-extend the low 8 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sxtb))]
+pub fn svextb_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv8i16")]
+        fn _svextb_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
+    }
+    unsafe { _svextb_s16_m(inactive, pg.into(), op) }
+}
+#[doc = "Sign-extend the low 8 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sxtb))]
+pub fn svextb_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t {
+    svextb_s16_m(op, pg, op)
+}
+#[doc = "Sign-extend the low 8 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sxtb))]
+pub fn svextb_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t {
+    svextb_s16_m(svdup_n_s16(0), pg, op)
+}
+#[doc = "Sign-extend the low 8 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sxtb))]
+pub fn svextb_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv4i32")]
+        fn _svextb_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svextb_s32_m(inactive, pg.into(), op) }
+}
+#[doc = "Sign-extend the low 8 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sxtb))]
+pub fn svextb_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svextb_s32_m(op, pg, op)
+}
+#[doc = "Sign-extend the low 8 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sxtb))]
+pub fn svextb_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svextb_s32_m(svdup_n_s32(0), pg, op)
+}
+#[doc = "Sign-extend the low 16 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sxth))]
+pub fn svexth_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxth.nxv4i32")]
+        fn _svexth_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svexth_s32_m(inactive, pg.into(), op) }
+}
+#[doc = "Sign-extend the low 16 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sxth))]
+pub fn svexth_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svexth_s32_m(op, pg, op)
+}
+#[doc = "Sign-extend the low 16 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sxth))]
+pub fn svexth_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svexth_s32_m(svdup_n_s32(0), pg, op)
+}
+#[doc = "Sign-extend the low 8 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sxtb))]
+pub fn svextb_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtb.nxv2i64")]
+        fn _svextb_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svextb_s64_m(inactive, pg.into(), op) }
+}
+#[doc = "Sign-extend the low 8 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sxtb))]
+pub fn svextb_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svextb_s64_m(op, pg, op)
+}
+#[doc = "Sign-extend the low 8 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sxtb))]
+pub fn svextb_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svextb_s64_m(svdup_n_s64(0), pg, op)
+}
+#[doc = "Sign-extend the low 16 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sxth))]
+pub fn svexth_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxth.nxv2i64")]
+        fn _svexth_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svexth_s64_m(inactive, pg.into(), op) }
+}
+#[doc = "Sign-extend the low 16 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sxth))]
+pub fn svexth_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svexth_s64_m(op, pg, op)
+}
+#[doc = "Sign-extend the low 16 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sxth))]
+pub fn svexth_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svexth_s64_m(svdup_n_s64(0), pg, op)
+}
+#[doc = "Sign-extend the low 32 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sxtw))]
+pub fn svextw_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sxtw.nxv2i64")]
+        fn _svextw_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svextw_s64_m(inactive, pg.into(), op) }
+}
+#[doc = "Sign-extend the low 32 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sxtw))]
+pub fn svextw_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svextw_s64_m(op, pg, op)
+}
+#[doc = "Sign-extend the low 32 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sxtw))]
+pub fn svextw_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svextw_s64_m(svdup_n_s64(0), pg, op)
+}
+#[doc = "Zero-extend the low 8 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uxtb))]
+pub fn svextb_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv8i16")]
+        fn _svextb_u16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
+    }
+    unsafe { _svextb_u16_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Zero-extend the low 8 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uxtb))]
+pub fn svextb_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    svextb_u16_m(op, pg, op)
+}
+#[doc = "Zero-extend the low 8 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uxtb))]
+pub fn svextb_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    svextb_u16_m(svdup_n_u16(0), pg, op)
+}
+#[doc = "Zero-extend the low 8 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uxtb))]
+pub fn svextb_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv4i32")]
+        fn _svextb_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svextb_u32_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Zero-extend the low 8 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uxtb))]
+pub fn svextb_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svextb_u32_m(op, pg, op)
+}
+#[doc = "Zero-extend the low 8 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uxtb))]
+pub fn svextb_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svextb_u32_m(svdup_n_u32(0), pg, op)
+}
+#[doc = "Zero-extend the low 16 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uxth))]
+pub fn svexth_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxth.nxv4i32")]
+        fn _svexth_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svexth_u32_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Zero-extend the low 16 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uxth))]
+pub fn svexth_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svexth_u32_m(op, pg, op)
+}
+#[doc = "Zero-extend the low 16 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uxth))]
+pub fn svexth_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svexth_u32_m(svdup_n_u32(0), pg, op)
+}
+#[doc = "Zero-extend the low 8 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uxtb))]
+pub fn svextb_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtb.nxv2i64")]
+        fn _svextb_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svextb_u64_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Zero-extend the low 8 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uxtb))]
+pub fn svextb_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svextb_u64_m(op, pg, op)
+}
+#[doc = "Zero-extend the low 8 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uxtb))]
+pub fn svextb_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svextb_u64_m(svdup_n_u64(0), pg, op)
+}
+#[doc = "Zero-extend the low 16 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uxth))]
+pub fn svexth_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxth.nxv2i64")]
+        fn _svexth_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svexth_u64_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Zero-extend the low 16 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uxth))]
+pub fn svexth_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svexth_u64_m(op, pg, op)
+}
+#[doc = "Zero-extend the low 16 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uxth))]
+pub fn svexth_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svexth_u64_m(svdup_n_u64(0), pg, op)
+}
+#[doc = "Zero-extend the low 32 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uxtw))]
+pub fn svextw_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uxtw.nxv2i64")]
+        fn _svextw_u64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svextw_u64_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Zero-extend the low 32 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uxtw))]
+pub fn svextw_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svextw_u64_m(op, pg, op)
+}
+#[doc = "Zero-extend the low 32 bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uxtw))]
+pub fn svextw_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svextw_u64_m(svdup_n_u64(0), pg, op)
+}
+#[doc = "Extract one vector from a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget2_f32<const IMM_INDEX: i32>(tuple: svfloat32x2_t) -> svfloat32_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.get.nxv4f32.nxv8f32"
+        )]
+        fn _svget2_f32(tuple: svfloat32x2_t, imm_index: i32) -> svfloat32_t;
+    }
+    unsafe { _svget2_f32(tuple, IMM_INDEX) }
+}
+#[doc = "Extract one vector from a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget2_f64<const IMM_INDEX: i32>(tuple: svfloat64x2_t) -> svfloat64_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.get.nxv2f64.nxv4f64"
+        )]
+        fn _svget2_f64(tuple: svfloat64x2_t, imm_index: i32) -> svfloat64_t;
+    }
+    unsafe { _svget2_f64(tuple, IMM_INDEX) }
+}
+#[doc = "Extract one vector from a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget2_s8<const IMM_INDEX: i32>(tuple: svint8x2_t) -> svint8_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.get.nxv16i8.nxv32i8"
+        )]
+        fn _svget2_s8(tuple: svint8x2_t, imm_index: i32) -> svint8_t;
+    }
+    unsafe { _svget2_s8(tuple, IMM_INDEX) }
+}
+#[doc = "Extract one vector from a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget2_s16<const IMM_INDEX: i32>(tuple: svint16x2_t) -> svint16_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.get.nxv8i16.nxv16i16"
+        )]
+        fn _svget2_s16(tuple: svint16x2_t, imm_index: i32) -> svint16_t;
+    }
+    unsafe { _svget2_s16(tuple, IMM_INDEX) }
+}
+#[doc = "Extract one vector from a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget2_s32<const IMM_INDEX: i32>(tuple: svint32x2_t) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.get.nxv4i32.nxv8i32"
+        )]
+        fn _svget2_s32(tuple: svint32x2_t, imm_index: i32) -> svint32_t;
+    }
+    unsafe { _svget2_s32(tuple, IMM_INDEX) }
+}
+#[doc = "Extract one vector from a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget2_s64<const IMM_INDEX: i32>(tuple: svint64x2_t) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.get.nxv2i64.nxv4i64"
+        )]
+        fn _svget2_s64(tuple: svint64x2_t, imm_index: i32) -> svint64_t;
+    }
+    unsafe { _svget2_s64(tuple, IMM_INDEX) }
+}
+#[doc = "Extract one vector from a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget2_u8<const IMM_INDEX: i32>(tuple: svuint8x2_t) -> svuint8_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    unsafe { svget2_s8::<IMM_INDEX>(tuple.as_signed()).as_unsigned() }
+}
+#[doc = "Extract one vector from a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget2_u16<const IMM_INDEX: i32>(tuple: svuint16x2_t) -> svuint16_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    unsafe { svget2_s16::<IMM_INDEX>(tuple.as_signed()).as_unsigned() }
+}
+#[doc = "Extract one vector from a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget2_u32<const IMM_INDEX: i32>(tuple: svuint32x2_t) -> svuint32_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    unsafe { svget2_s32::<IMM_INDEX>(tuple.as_signed()).as_unsigned() }
+}
+#[doc = "Extract one vector from a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget2_u64<const IMM_INDEX: i32>(tuple: svuint64x2_t) -> svuint64_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    unsafe { svget2_s64::<IMM_INDEX>(tuple.as_signed()).as_unsigned() }
+}
+#[doc = "Extract one vector from a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget3_f32<const IMM_INDEX: i32>(tuple: svfloat32x3_t) -> svfloat32_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.get.nxv4f32.nxv12f32"
+        )]
+        fn _svget3_f32(tuple: svfloat32x3_t, imm_index: i32) -> svfloat32_t;
+    }
+    unsafe { _svget3_f32(tuple, IMM_INDEX) }
+}
+#[doc = "Extract one vector from a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget3_f64<const IMM_INDEX: i32>(tuple: svfloat64x3_t) -> svfloat64_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.get.nxv2f64.nxv6f64"
+        )]
+        fn _svget3_f64(tuple: svfloat64x3_t, imm_index: i32) -> svfloat64_t;
+    }
+    unsafe { _svget3_f64(tuple, IMM_INDEX) }
+}
+#[doc = "Extract one vector from a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget3_s8<const IMM_INDEX: i32>(tuple: svint8x3_t) -> svint8_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.get.nxv16i8.nxv48i8"
+        )]
+        fn _svget3_s8(tuple: svint8x3_t, imm_index: i32) -> svint8_t;
+    }
+    unsafe { _svget3_s8(tuple, IMM_INDEX) }
+}
+#[doc = "Extract one vector from a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget3_s16<const IMM_INDEX: i32>(tuple: svint16x3_t) -> svint16_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.get.nxv8i16.nxv24i16"
+        )]
+        fn _svget3_s16(tuple: svint16x3_t, imm_index: i32) -> svint16_t;
+    }
+    unsafe { _svget3_s16(tuple, IMM_INDEX) }
+}
+#[doc = "Extract one vector from a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget3_s32<const IMM_INDEX: i32>(tuple: svint32x3_t) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.get.nxv4i32.nxv12i32"
+        )]
+        fn _svget3_s32(tuple: svint32x3_t, imm_index: i32) -> svint32_t;
+    }
+    unsafe { _svget3_s32(tuple, IMM_INDEX) }
+}
+#[doc = "Extract one vector from a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget3_s64<const IMM_INDEX: i32>(tuple: svint64x3_t) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.get.nxv2i64.nxv6i64"
+        )]
+        fn _svget3_s64(tuple: svint64x3_t, imm_index: i32) -> svint64_t;
+    }
+    unsafe { _svget3_s64(tuple, IMM_INDEX) }
+}
+#[doc = "Extract one vector from a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget3_u8<const IMM_INDEX: i32>(tuple: svuint8x3_t) -> svuint8_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    unsafe { svget3_s8::<IMM_INDEX>(tuple.as_signed()).as_unsigned() }
+}
+#[doc = "Extract one vector from a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget3_u16<const IMM_INDEX: i32>(tuple: svuint16x3_t) -> svuint16_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    unsafe { svget3_s16::<IMM_INDEX>(tuple.as_signed()).as_unsigned() }
+}
+#[doc = "Extract one vector from a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget3_u32<const IMM_INDEX: i32>(tuple: svuint32x3_t) -> svuint32_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    unsafe { svget3_s32::<IMM_INDEX>(tuple.as_signed()).as_unsigned() }
+}
+#[doc = "Extract one vector from a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget3_u64<const IMM_INDEX: i32>(tuple: svuint64x3_t) -> svuint64_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    unsafe { svget3_s64::<IMM_INDEX>(tuple.as_signed()).as_unsigned() }
+}
+#[doc = "Extract one vector from a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget4_f32<const IMM_INDEX: i32>(tuple: svfloat32x4_t) -> svfloat32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.get.nxv4f32.nxv16f32"
+        )]
+        fn _svget4_f32(tuple: svfloat32x4_t, imm_index: i32) -> svfloat32_t;
+    }
+    unsafe { _svget4_f32(tuple, IMM_INDEX) }
+}
+#[doc = "Extract one vector from a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget4_f64<const IMM_INDEX: i32>(tuple: svfloat64x4_t) -> svfloat64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.get.nxv2f64.nxv8f64"
+        )]
+        fn _svget4_f64(tuple: svfloat64x4_t, imm_index: i32) -> svfloat64_t;
+    }
+    unsafe { _svget4_f64(tuple, IMM_INDEX) }
+}
+#[doc = "Extract one vector from a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget4_s8<const IMM_INDEX: i32>(tuple: svint8x4_t) -> svint8_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.get.nxv16i8.nxv64i8"
+        )]
+        fn _svget4_s8(tuple: svint8x4_t, imm_index: i32) -> svint8_t;
+    }
+    unsafe { _svget4_s8(tuple, IMM_INDEX) }
+}
+#[doc = "Extract one vector from a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget4_s16<const IMM_INDEX: i32>(tuple: svint16x4_t) -> svint16_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.get.nxv8i16.nxv32i16"
+        )]
+        fn _svget4_s16(tuple: svint16x4_t, imm_index: i32) -> svint16_t;
+    }
+    unsafe { _svget4_s16(tuple, IMM_INDEX) }
+}
+#[doc = "Extract one vector from a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget4_s32<const IMM_INDEX: i32>(tuple: svint32x4_t) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.get.nxv4i32.nxv16i32"
+        )]
+        fn _svget4_s32(tuple: svint32x4_t, imm_index: i32) -> svint32_t;
+    }
+    unsafe { _svget4_s32(tuple, IMM_INDEX) }
+}
+#[doc = "Extract one vector from a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget4_s64<const IMM_INDEX: i32>(tuple: svint64x4_t) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.get.nxv2i64.nxv8i64"
+        )]
+        fn _svget4_s64(tuple: svint64x4_t, imm_index: i32) -> svint64_t;
+    }
+    unsafe { _svget4_s64(tuple, IMM_INDEX) }
+}
+#[doc = "Extract one vector from a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget4_u8<const IMM_INDEX: i32>(tuple: svuint8x4_t) -> svuint8_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    unsafe { svget4_s8::<IMM_INDEX>(tuple.as_signed()).as_unsigned() }
+}
+#[doc = "Extract one vector from a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget4_u16<const IMM_INDEX: i32>(tuple: svuint16x4_t) -> svuint16_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    unsafe { svget4_s16::<IMM_INDEX>(tuple.as_signed()).as_unsigned() }
+}
+#[doc = "Extract one vector from a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget4_u32<const IMM_INDEX: i32>(tuple: svuint32x4_t) -> svuint32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    unsafe { svget4_s32::<IMM_INDEX>(tuple.as_signed()).as_unsigned() }
+}
+#[doc = "Extract one vector from a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svget4_u64<const IMM_INDEX: i32>(tuple: svuint64x4_t) -> svuint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    unsafe { svget4_s64::<IMM_INDEX>(tuple.as_signed()).as_unsigned() }
+}
+#[doc = "Create linear series"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(index))]
+pub fn svindex_s8(base: i8, step: i8) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv16i8")]
+        fn _svindex_s8(base: i8, step: i8) -> svint8_t;
+    }
+    unsafe { _svindex_s8(base, step) }
+}
+#[doc = "Create linear series"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(index))]
+pub fn svindex_s16(base: i16, step: i16) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv8i16")]
+        fn _svindex_s16(base: i16, step: i16) -> svint16_t;
+    }
+    unsafe { _svindex_s16(base, step) }
+}
+#[doc = "Create linear series"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(index))]
+pub fn svindex_s32(base: i32, step: i32) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv4i32")]
+        fn _svindex_s32(base: i32, step: i32) -> svint32_t;
+    }
+    unsafe { _svindex_s32(base, step) }
+}
+#[doc = "Create linear series"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(index))]
+pub fn svindex_s64(base: i64, step: i64) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.index.nxv2i64")]
+        fn _svindex_s64(base: i64, step: i64) -> svint64_t;
+    }
+    unsafe { _svindex_s64(base, step) }
+}
+#[doc = "Create linear series"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(index))]
+pub fn svindex_u8(base: u8, step: u8) -> svuint8_t {
+    unsafe { svindex_s8(base.as_signed(), step.as_signed()).as_unsigned() }
+}
+#[doc = "Create linear series"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(index))]
+pub fn svindex_u16(base: u16, step: u16) -> svuint16_t {
+    unsafe { svindex_s16(base.as_signed(), step.as_signed()).as_unsigned() }
+}
+#[doc = "Create linear series"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(index))]
+pub fn svindex_u32(base: u32, step: u32) -> svuint32_t {
+    unsafe { svindex_s32(base.as_signed(), step.as_signed()).as_unsigned() }
+}
+#[doc = "Create linear series"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(index))]
+pub fn svindex_u64(base: u64, step: u64) -> svuint64_t {
+    unsafe { svindex_s64(base.as_signed(), step.as_signed()).as_unsigned() }
+}
+#[doc = "Insert scalar in shifted vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(insr))]
+pub fn svinsr_n_f32(op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv4f32")]
+        fn _svinsr_n_f32(op1: svfloat32_t, op2: f32) -> svfloat32_t;
+    }
+    unsafe { _svinsr_n_f32(op1, op2) }
+}
+#[doc = "Insert scalar in shifted vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(insr))]
+pub fn svinsr_n_f64(op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv2f64")]
+        fn _svinsr_n_f64(op1: svfloat64_t, op2: f64) -> svfloat64_t;
+    }
+    unsafe { _svinsr_n_f64(op1, op2) }
+}
+#[doc = "Insert scalar in shifted vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(insr))]
+pub fn svinsr_n_s8(op1: svint8_t, op2: i8) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv16i8")]
+        fn _svinsr_n_s8(op1: svint8_t, op2: i8) -> svint8_t;
+    }
+    unsafe { _svinsr_n_s8(op1, op2) }
+}
+#[doc = "Insert scalar in shifted vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(insr))]
+pub fn svinsr_n_s16(op1: svint16_t, op2: i16) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv8i16")]
+        fn _svinsr_n_s16(op1: svint16_t, op2: i16) -> svint16_t;
+    }
+    unsafe { _svinsr_n_s16(op1, op2) }
+}
+#[doc = "Insert scalar in shifted vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(insr))]
+pub fn svinsr_n_s32(op1: svint32_t, op2: i32) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv4i32")]
+        fn _svinsr_n_s32(op1: svint32_t, op2: i32) -> svint32_t;
+    }
+    unsafe { _svinsr_n_s32(op1, op2) }
+}
+#[doc = "Insert scalar in shifted vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(insr))]
+pub fn svinsr_n_s64(op1: svint64_t, op2: i64) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.insr.nxv2i64")]
+        fn _svinsr_n_s64(op1: svint64_t, op2: i64) -> svint64_t;
+    }
+    unsafe { _svinsr_n_s64(op1, op2) }
+}
+#[doc = "Insert scalar in shifted vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(insr))]
+pub fn svinsr_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t {
+    unsafe { svinsr_n_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Insert scalar in shifted vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(insr))]
+pub fn svinsr_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t {
+    unsafe { svinsr_n_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Insert scalar in shifted vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(insr))]
+pub fn svinsr_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t {
+    unsafe { svinsr_n_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Insert scalar in shifted vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(insr))]
+pub fn svinsr_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t {
+    unsafe { svinsr_n_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lasta))]
+pub fn svlasta_f32(pg: svbool_t, op: svfloat32_t) -> f32 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv4f32")]
+        fn _svlasta_f32(pg: svbool4_t, op: svfloat32_t) -> f32;
+    }
+    unsafe { _svlasta_f32(pg.into(), op) }
+}
+#[doc = "Extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lasta))]
+pub fn svlasta_f64(pg: svbool_t, op: svfloat64_t) -> f64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv2f64")]
+        fn _svlasta_f64(pg: svbool2_t, op: svfloat64_t) -> f64;
+    }
+    unsafe { _svlasta_f64(pg.into(), op) }
+}
+#[doc = "Extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lasta))]
+pub fn svlasta_s8(pg: svbool_t, op: svint8_t) -> i8 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv16i8")]
+        fn _svlasta_s8(pg: svbool_t, op: svint8_t) -> i8;
+    }
+    unsafe { _svlasta_s8(pg, op) }
+}
+#[doc = "Extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lasta))]
+pub fn svlasta_s16(pg: svbool_t, op: svint16_t) -> i16 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv8i16")]
+        fn _svlasta_s16(pg: svbool8_t, op: svint16_t) -> i16;
+    }
+    unsafe { _svlasta_s16(pg.into(), op) }
+}
+#[doc = "Extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lasta))]
+pub fn svlasta_s32(pg: svbool_t, op: svint32_t) -> i32 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv4i32")]
+        fn _svlasta_s32(pg: svbool4_t, op: svint32_t) -> i32;
+    }
+    unsafe { _svlasta_s32(pg.into(), op) }
+}
+#[doc = "Extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lasta))]
+pub fn svlasta_s64(pg: svbool_t, op: svint64_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lasta.nxv2i64")]
+        fn _svlasta_s64(pg: svbool2_t, op: svint64_t) -> i64;
+    }
+    unsafe { _svlasta_s64(pg.into(), op) }
+}
+#[doc = "Extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lasta))]
+pub fn svlasta_u8(pg: svbool_t, op: svuint8_t) -> u8 {
+    unsafe { svlasta_s8(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lasta))]
+pub fn svlasta_u16(pg: svbool_t, op: svuint16_t) -> u16 {
+    unsafe { svlasta_s16(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lasta))]
+pub fn svlasta_u32(pg: svbool_t, op: svuint32_t) -> u32 {
+    unsafe { svlasta_s32(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Extract element after last"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lasta))]
+pub fn svlasta_u64(pg: svbool_t, op: svuint64_t) -> u64 {
+    unsafe { svlasta_s64(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lastb))]
+pub fn svlastb_f32(pg: svbool_t, op: svfloat32_t) -> f32 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv4f32")]
+        fn _svlastb_f32(pg: svbool4_t, op: svfloat32_t) -> f32;
+    }
+    unsafe { _svlastb_f32(pg.into(), op) }
+}
+#[doc = "Extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lastb))]
+pub fn svlastb_f64(pg: svbool_t, op: svfloat64_t) -> f64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv2f64")]
+        fn _svlastb_f64(pg: svbool2_t, op: svfloat64_t) -> f64;
+    }
+    unsafe { _svlastb_f64(pg.into(), op) }
+}
+#[doc = "Extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lastb))]
+pub fn svlastb_s8(pg: svbool_t, op: svint8_t) -> i8 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv16i8")]
+        fn _svlastb_s8(pg: svbool_t, op: svint8_t) -> i8;
+    }
+    unsafe { _svlastb_s8(pg, op) }
+}
+#[doc = "Extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lastb))]
+pub fn svlastb_s16(pg: svbool_t, op: svint16_t) -> i16 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv8i16")]
+        fn _svlastb_s16(pg: svbool8_t, op: svint16_t) -> i16;
+    }
+    unsafe { _svlastb_s16(pg.into(), op) }
+}
+#[doc = "Extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lastb))]
+pub fn svlastb_s32(pg: svbool_t, op: svint32_t) -> i32 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv4i32")]
+        fn _svlastb_s32(pg: svbool4_t, op: svint32_t) -> i32;
+    }
+    unsafe { _svlastb_s32(pg.into(), op) }
+}
+#[doc = "Extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lastb))]
+pub fn svlastb_s64(pg: svbool_t, op: svint64_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lastb.nxv2i64")]
+        fn _svlastb_s64(pg: svbool2_t, op: svint64_t) -> i64;
+    }
+    unsafe { _svlastb_s64(pg.into(), op) }
+}
+#[doc = "Extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lastb))]
+pub fn svlastb_u8(pg: svbool_t, op: svuint8_t) -> u8 {
+    unsafe { svlastb_s8(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lastb))]
+pub fn svlastb_u16(pg: svbool_t, op: svuint16_t) -> u16 {
+    unsafe { svlastb_s16(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lastb))]
+pub fn svlastb_u32(pg: svbool_t, op: svuint32_t) -> u32 {
+    unsafe { svlastb_s32(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Extract last element"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lastb))]
+pub fn svlastb_u64(pg: svbool_t, op: svuint64_t) -> u64 {
+    unsafe { svlastb_s64(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4f32")]
+        fn _svld1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t;
+    }
+    _svld1_f32(pg.into(), base)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2f64")]
+        fn _svld1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t;
+    }
+    _svld1_f64(pg.into(), base)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv16i8")]
+        fn _svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t;
+    }
+    _svld1_s8(pg, base)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1_s16(pg: svbool_t, base: *const i16) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i16")]
+        fn _svld1_s16(pg: svbool8_t, base: *const i16) -> svint16_t;
+    }
+    _svld1_s16(pg.into(), base)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_s32(pg: svbool_t, base: *const i32) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i32")]
+        fn _svld1_s32(pg: svbool4_t, base: *const i32) -> svint32_t;
+    }
+    _svld1_s32(pg.into(), base)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_s64(pg: svbool_t, base: *const i64) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i64")]
+        fn _svld1_s64(pg: svbool2_t, base: *const i64) -> svint64_t;
+    }
+    _svld1_s64(pg.into(), base)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1_u8(pg: svbool_t, base: *const u8) -> svuint8_t {
+    svld1_s8(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1_u16(pg: svbool_t, base: *const u16) -> svuint16_t {
+    svld1_s16(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_u32(pg: svbool_t, base: *const u32) -> svuint32_t {
+    svld1_s32(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_u64(pg: svbool_t, base: *const u64) -> svuint64_t {
+    svld1_s64(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_s32index_f32(
+    pg: svbool_t,
+    base: *const f32,
+    indices: svint32_t,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4f32"
+        )]
+        fn _svld1_gather_s32index_f32(
+            pg: svbool4_t,
+            base: *const f32,
+            indices: svint32_t,
+        ) -> svfloat32_t;
+    }
+    _svld1_gather_s32index_f32(pg.into(), base, indices)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_s32index_s32(
+    pg: svbool_t,
+    base: *const i32,
+    indices: svint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i32"
+        )]
+        fn _svld1_gather_s32index_s32(
+            pg: svbool4_t,
+            base: *const i32,
+            indices: svint32_t,
+        ) -> svint32_t;
+    }
+    _svld1_gather_s32index_s32(pg.into(), base, indices)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]index[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_s32index_u32(
+    pg: svbool_t,
+    base: *const u32,
+    indices: svint32_t,
+) -> svuint32_t {
+    svld1_gather_s32index_s32(pg, base.as_signed(), indices).as_unsigned()
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_s64index_f64(
+    pg: svbool_t,
+    base: *const f64,
+    indices: svint64_t,
+) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2f64"
+        )]
+        fn _svld1_gather_s64index_f64(
+            pg: svbool2_t,
+            base: *const f64,
+            indices: svint64_t,
+        ) -> svfloat64_t;
+    }
+    _svld1_gather_s64index_f64(pg.into(), base, indices)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_s64index_s64(
+    pg: svbool_t,
+    base: *const i64,
+    indices: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i64"
+        )]
+        fn _svld1_gather_s64index_s64(
+            pg: svbool2_t,
+            base: *const i64,
+            indices: svint64_t,
+        ) -> svint64_t;
+    }
+    _svld1_gather_s64index_s64(pg.into(), base, indices)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_s64index_u64(
+    pg: svbool_t,
+    base: *const u64,
+    indices: svint64_t,
+) -> svuint64_t {
+    svld1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned()
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_u32index_f32(
+    pg: svbool_t,
+    base: *const f32,
+    indices: svuint32_t,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4f32"
+        )]
+        fn _svld1_gather_u32index_f32(
+            pg: svbool4_t,
+            base: *const f32,
+            indices: svint32_t,
+        ) -> svfloat32_t;
+    }
+    _svld1_gather_u32index_f32(pg.into(), base, indices.as_signed())
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_u32index_s32(
+    pg: svbool_t,
+    base: *const i32,
+    indices: svuint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32"
+        )]
+        fn _svld1_gather_u32index_s32(
+            pg: svbool4_t,
+            base: *const i32,
+            indices: svint32_t,
+        ) -> svint32_t;
+    }
+    _svld1_gather_u32index_s32(pg.into(), base, indices.as_signed())
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]index[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_u32index_u32(
+    pg: svbool_t,
+    base: *const u32,
+    indices: svuint32_t,
+) -> svuint32_t {
+    svld1_gather_u32index_s32(pg, base.as_signed(), indices).as_unsigned()
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_u64index_f64(
+    pg: svbool_t,
+    base: *const f64,
+    indices: svuint64_t,
+) -> svfloat64_t {
+    svld1_gather_s64index_f64(pg, base, indices.as_signed())
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_u64index_s64(
+    pg: svbool_t,
+    base: *const i64,
+    indices: svuint64_t,
+) -> svint64_t {
+    svld1_gather_s64index_s64(pg, base, indices.as_signed())
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_u64index_u64(
+    pg: svbool_t,
+    base: *const u64,
+    indices: svuint64_t,
+) -> svuint64_t {
+    svld1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_s32offset_f32(
+    pg: svbool_t,
+    base: *const f32,
+    offsets: svint32_t,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4f32"
+        )]
+        fn _svld1_gather_s32offset_f32(
+            pg: svbool4_t,
+            base: *const f32,
+            offsets: svint32_t,
+        ) -> svfloat32_t;
+    }
+    _svld1_gather_s32offset_f32(pg.into(), base, offsets)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_s32offset_s32(
+    pg: svbool_t,
+    base: *const i32,
+    offsets: svint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32"
+        )]
+        fn _svld1_gather_s32offset_s32(
+            pg: svbool4_t,
+            base: *const i32,
+            offsets: svint32_t,
+        ) -> svint32_t;
+    }
+    _svld1_gather_s32offset_s32(pg.into(), base, offsets)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s32]offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_s32offset_u32(
+    pg: svbool_t,
+    base: *const u32,
+    offsets: svint32_t,
+) -> svuint32_t {
+    svld1_gather_s32offset_s32(pg, base.as_signed(), offsets).as_unsigned()
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_s64offset_f64(
+    pg: svbool_t,
+    base: *const f64,
+    offsets: svint64_t,
+) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.nxv2f64"
+        )]
+        fn _svld1_gather_s64offset_f64(
+            pg: svbool2_t,
+            base: *const f64,
+            offsets: svint64_t,
+        ) -> svfloat64_t;
+    }
+    _svld1_gather_s64offset_f64(pg.into(), base, offsets)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const i64,
+    offsets: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.nxv2i64"
+        )]
+        fn _svld1_gather_s64offset_s64(
+            pg: svbool2_t,
+            base: *const i64,
+            offsets: svint64_t,
+        ) -> svint64_t;
+    }
+    _svld1_gather_s64offset_s64(pg.into(), base, offsets)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[s64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const u64,
+    offsets: svint64_t,
+) -> svuint64_t {
+    svld1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned()
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_u32offset_f32(
+    pg: svbool_t,
+    base: *const f32,
+    offsets: svuint32_t,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4f32"
+        )]
+        fn _svld1_gather_u32offset_f32(
+            pg: svbool4_t,
+            base: *const f32,
+            offsets: svint32_t,
+        ) -> svfloat32_t;
+    }
+    _svld1_gather_u32offset_f32(pg.into(), base, offsets.as_signed())
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_u32offset_s32(
+    pg: svbool_t,
+    base: *const i32,
+    offsets: svuint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32"
+        )]
+        fn _svld1_gather_u32offset_s32(
+            pg: svbool4_t,
+            base: *const i32,
+            offsets: svint32_t,
+        ) -> svint32_t;
+    }
+    _svld1_gather_u32offset_s32(pg.into(), base, offsets.as_signed())
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u32]offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_u32offset_u32(
+    pg: svbool_t,
+    base: *const u32,
+    offsets: svuint32_t,
+) -> svuint32_t {
+    svld1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned()
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_u64offset_f64(
+    pg: svbool_t,
+    base: *const f64,
+    offsets: svuint64_t,
+) -> svfloat64_t {
+    svld1_gather_s64offset_f64(pg, base, offsets.as_signed())
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const i64,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svld1_gather_s64offset_s64(pg, base, offsets.as_signed())
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather_[u64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const u64,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svld1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_f32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t {
+    svld1_gather_u32base_offset_f32(pg, bases, 0)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
+    svld1_gather_u32base_offset_s32(pg, bases, 0)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
+    svld1_gather_u32base_offset_u32(pg, bases, 0)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_f64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t {
+    svld1_gather_u64base_offset_f64(pg, bases, 0)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svld1_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svld1_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_f32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_u32base_index_f32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svfloat32_t {
+    svld1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_u32base_index_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svint32_t {
+    svld1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_index_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_u32base_index_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svuint32_t {
+    svld1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_f64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_u64base_index_f64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svfloat64_t {
+    svld1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3))
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svint64_t {
+    svld1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3))
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svuint64_t {
+    svld1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3))
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_f32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_u32base_offset_f32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4f32.nxv4i32"
+        )]
+        fn _svld1_gather_u32base_offset_f32(
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        ) -> svfloat32_t;
+    }
+    _svld1_gather_u32base_offset_f32(pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i32.nxv4i32"
+        )]
+        fn _svld1_gather_u32base_offset_s32(
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        ) -> svint32_t;
+    }
+    _svld1_gather_u32base_offset_s32(pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u32base]_offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_gather_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svuint32_t {
+    svld1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned()
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_f64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_u64base_offset_f64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2f64.nxv2i64"
+        )]
+        fn _svld1_gather_u64base_offset_f64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> svfloat64_t;
+    }
+    _svld1_gather_u64base_offset_f64(pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i64.nxv2i64"
+        )]
+        fn _svld1_gather_u64base_offset_s64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> svint64_t;
+    }
+    _svld1_gather_u64base_offset_s64(pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    svld1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t {
+    svld1_f32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t {
+    svld1_f64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t {
+    svld1_s8(pg, base.offset(svcntb() as isize * vnum as isize))
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t {
+    svld1_s16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t {
+    svld1_s32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t {
+    svld1_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t {
+    svld1_u8(pg, base.offset(svcntb() as isize * vnum as isize))
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t {
+    svld1_u16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t {
+    svld1_u32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Unextended load"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1_vnum[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1d))]
+pub unsafe fn svld1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t {
+    svld1_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load and replicate 256 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(ld1row))]
+pub unsafe fn svld1ro_f32(pg: svbool_t, base: *const f32) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv4f32")]
+        fn _svld1ro_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t;
+    }
+    _svld1ro_f32(pg.into(), base)
+}
+#[doc = "Load and replicate 256 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(ld1rod))]
+pub unsafe fn svld1ro_f64(pg: svbool_t, base: *const f64) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv2f64")]
+        fn _svld1ro_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t;
+    }
+    _svld1ro_f64(pg.into(), base)
+}
+#[doc = "Load and replicate 256 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(ld1rob))]
+pub unsafe fn svld1ro_s8(pg: svbool_t, base: *const i8) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv16i8")]
+        fn _svld1ro_s8(pg: svbool_t, base: *const i8) -> svint8_t;
+    }
+    _svld1ro_s8(pg, base)
+}
+#[doc = "Load and replicate 256 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(ld1roh))]
+pub unsafe fn svld1ro_s16(pg: svbool_t, base: *const i16) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv8i16")]
+        fn _svld1ro_s16(pg: svbool8_t, base: *const i16) -> svint16_t;
+    }
+    _svld1ro_s16(pg.into(), base)
+}
+#[doc = "Load and replicate 256 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(ld1row))]
+pub unsafe fn svld1ro_s32(pg: svbool_t, base: *const i32) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv4i32")]
+        fn _svld1ro_s32(pg: svbool4_t, base: *const i32) -> svint32_t;
+    }
+    _svld1ro_s32(pg.into(), base)
+}
+#[doc = "Load and replicate 256 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(ld1rod))]
+pub unsafe fn svld1ro_s64(pg: svbool_t, base: *const i64) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1ro.nxv2i64")]
+        fn _svld1ro_s64(pg: svbool2_t, base: *const i64) -> svint64_t;
+    }
+    _svld1ro_s64(pg.into(), base)
+}
+#[doc = "Load and replicate 256 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(ld1rob))]
+pub unsafe fn svld1ro_u8(pg: svbool_t, base: *const u8) -> svuint8_t {
+    svld1ro_s8(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load and replicate 256 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(ld1roh))]
+pub unsafe fn svld1ro_u16(pg: svbool_t, base: *const u16) -> svuint16_t {
+    svld1ro_s16(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load and replicate 256 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(ld1row))]
+pub unsafe fn svld1ro_u32(pg: svbool_t, base: *const u32) -> svuint32_t {
+    svld1ro_s32(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load and replicate 256 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ro[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(ld1rod))]
+pub unsafe fn svld1ro_u64(pg: svbool_t, base: *const u64) -> svuint64_t {
+    svld1ro_s64(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load and replicate 128 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1rqw))]
+pub unsafe fn svld1rq_f32(pg: svbool_t, base: *const f32) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv4f32")]
+        fn _svld1rq_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t;
+    }
+    _svld1rq_f32(pg.into(), base)
+}
+#[doc = "Load and replicate 128 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1rqd))]
+pub unsafe fn svld1rq_f64(pg: svbool_t, base: *const f64) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv2f64")]
+        fn _svld1rq_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t;
+    }
+    _svld1rq_f64(pg.into(), base)
+}
+#[doc = "Load and replicate 128 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1rqb))]
+pub unsafe fn svld1rq_s8(pg: svbool_t, base: *const i8) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv16i8")]
+        fn _svld1rq_s8(pg: svbool_t, base: *const i8) -> svint8_t;
+    }
+    _svld1rq_s8(pg, base)
+}
+#[doc = "Load and replicate 128 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1rqh))]
+pub unsafe fn svld1rq_s16(pg: svbool_t, base: *const i16) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv8i16")]
+        fn _svld1rq_s16(pg: svbool8_t, base: *const i16) -> svint16_t;
+    }
+    _svld1rq_s16(pg.into(), base)
+}
+#[doc = "Load and replicate 128 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1rqw))]
+pub unsafe fn svld1rq_s32(pg: svbool_t, base: *const i32) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv4i32")]
+        fn _svld1rq_s32(pg: svbool4_t, base: *const i32) -> svint32_t;
+    }
+    _svld1rq_s32(pg.into(), base)
+}
+#[doc = "Load and replicate 128 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1rqd))]
+pub unsafe fn svld1rq_s64(pg: svbool_t, base: *const i64) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1rq.nxv2i64")]
+        fn _svld1rq_s64(pg: svbool2_t, base: *const i64) -> svint64_t;
+    }
+    _svld1rq_s64(pg.into(), base)
+}
+#[doc = "Load and replicate 128 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1rqb))]
+pub unsafe fn svld1rq_u8(pg: svbool_t, base: *const u8) -> svuint8_t {
+    svld1rq_s8(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load and replicate 128 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1rqh))]
+pub unsafe fn svld1rq_u16(pg: svbool_t, base: *const u16) -> svuint16_t {
+    svld1rq_s16(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load and replicate 128 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1rqw))]
+pub unsafe fn svld1rq_u32(pg: svbool_t, base: *const u32) -> svuint32_t {
+    svld1rq_s32(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load and replicate 128 bits of data"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1rq[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1rqd))]
+pub unsafe fn svld1rq_u64(pg: svbool_t, base: *const u64) -> svuint64_t {
+    svld1rq_s64(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_gather_s32offset_s32(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8"
+        )]
+        fn _svld1sb_gather_s32offset_s32(
+            pg: svbool4_t,
+            base: *const i8,
+            offsets: svint32_t,
+        ) -> nxv4i8;
+    }
+    simd_cast(_svld1sb_gather_s32offset_s32(pg.into(), base, offsets))
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_s32offset_s32(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16"
+        )]
+        fn _svld1sh_gather_s32offset_s32(
+            pg: svbool4_t,
+            base: *const i16,
+            offsets: svint32_t,
+        ) -> nxv4i16;
+    }
+    simd_cast(_svld1sh_gather_s32offset_s32(pg.into(), base, offsets))
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_gather_s32offset_u32(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svint32_t,
+) -> svuint32_t {
+    svld1sb_gather_s32offset_s32(pg, base, offsets).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_s32offset_u32(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svint32_t,
+) -> svuint32_t {
+    svld1sh_gather_s32offset_s32(pg, base, offsets).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.nxv2i8"
+        )]
+        fn _svld1sb_gather_s64offset_s64(
+            pg: svbool2_t,
+            base: *const i8,
+            offsets: svint64_t,
+        ) -> nxv2i8;
+    }
+    simd_cast(_svld1sb_gather_s64offset_s64(pg.into(), base, offsets))
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.nxv2i16"
+        )]
+        fn _svld1sh_gather_s64offset_s64(
+            pg: svbool2_t,
+            base: *const i16,
+            offsets: svint64_t,
+        ) -> nxv2i16;
+    }
+    simd_cast(_svld1sh_gather_s64offset_s64(pg.into(), base, offsets))
+}
+#[doc = "Load 32-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sw))]
+pub unsafe fn svld1sw_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const i32,
+    offsets: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.nxv2i32"
+        )]
+        fn _svld1sw_gather_s64offset_s64(
+            pg: svbool2_t,
+            base: *const i32,
+            offsets: svint64_t,
+        ) -> nxv2i32;
+    }
+    simd_cast(_svld1sw_gather_s64offset_s64(pg.into(), base, offsets))
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[s64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svint64_t,
+) -> svuint64_t {
+    svld1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svint64_t,
+) -> svuint64_t {
+    svld1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned()
+}
+#[doc = "Load 32-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sw))]
+pub unsafe fn svld1sw_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const i32,
+    offsets: svint64_t,
+) -> svuint64_t {
+    svld1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_gather_u32offset_s32(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svuint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8"
+        )]
+        fn _svld1sb_gather_u32offset_s32(
+            pg: svbool4_t,
+            base: *const i8,
+            offsets: svint32_t,
+        ) -> nxv4i8;
+    }
+    simd_cast(_svld1sb_gather_u32offset_s32(
+        pg.into(),
+        base,
+        offsets.as_signed(),
+    ))
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u32offset_s32(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svuint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16"
+        )]
+        fn _svld1sh_gather_u32offset_s32(
+            pg: svbool4_t,
+            base: *const i16,
+            offsets: svint32_t,
+        ) -> nxv4i16;
+    }
+    simd_cast(_svld1sh_gather_u32offset_s32(
+        pg.into(),
+        base,
+        offsets.as_signed(),
+    ))
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_gather_u32offset_u32(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svuint32_t,
+) -> svuint32_t {
+    svld1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u32offset_u32(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svuint32_t,
+) -> svuint32_t {
+    svld1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svld1sb_gather_s64offset_s64(pg, base, offsets.as_signed())
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svld1sh_gather_s64offset_s64(pg, base, offsets.as_signed())
+}
+#[doc = "Load 32-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sw))]
+pub unsafe fn svld1sw_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const i32,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svld1sw_gather_s64offset_s64(pg, base, offsets.as_signed())
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather_[u64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svld1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svld1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned()
+}
+#[doc = "Load 32-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sw))]
+pub unsafe fn svld1sw_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const i32,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svld1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_gather_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32"
+        )]
+        fn _svld1sb_gather_u32base_offset_s32(
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        ) -> nxv4i8;
+    }
+    simd_cast(_svld1sb_gather_u32base_offset_s32(
+        pg.into(),
+        bases.as_signed(),
+        offset,
+    ))
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32"
+        )]
+        fn _svld1sh_gather_u32base_offset_s32(
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        ) -> nxv4i16;
+    }
+    simd_cast(_svld1sh_gather_u32base_offset_s32(
+        pg.into(),
+        bases.as_signed(),
+        offset,
+    ))
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_gather_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svuint32_t {
+    svld1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svuint32_t {
+    svld1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64"
+        )]
+        fn _svld1sb_gather_u64base_offset_s64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> nxv2i8;
+    }
+    simd_cast(_svld1sb_gather_u64base_offset_s64(
+        pg.into(),
+        bases.as_signed(),
+        offset,
+    ))
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64"
+        )]
+        fn _svld1sh_gather_u64base_offset_s64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> nxv2i16;
+    }
+    simd_cast(_svld1sh_gather_u64base_offset_s64(
+        pg.into(),
+        bases.as_signed(),
+        offset,
+    ))
+}
+#[doc = "Load 32-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sw))]
+pub unsafe fn svld1sw_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64"
+        )]
+        fn _svld1sw_gather_u64base_offset_s64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> nxv2i32;
+    }
+    simd_cast(_svld1sw_gather_u64base_offset_s64(
+        pg.into(),
+        bases.as_signed(),
+        offset,
+    ))
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    svld1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    svld1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
+}
+#[doc = "Load 32-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sw))]
+pub unsafe fn svld1sw_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    svld1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
+    svld1sb_gather_u32base_offset_s32(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
+    svld1sh_gather_u32base_offset_s32(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u32base]_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
+    svld1sb_gather_u32base_offset_u32(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
+    svld1sh_gather_u32base_offset_u32(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svld1sb_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svld1sh_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Load 32-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sw))]
+pub unsafe fn svld1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svld1sw_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svld1sb_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svld1sh_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Load 32-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sw))]
+pub unsafe fn svld1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svld1sw_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i8")]
+        fn _svld1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8;
+    }
+    simd_cast(_svld1sb_s16(pg.into(), base))
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i8")]
+        fn _svld1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8;
+    }
+    simd_cast(_svld1sb_s32(pg.into(), base))
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i16")]
+        fn _svld1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16;
+    }
+    simd_cast(_svld1sh_s32(pg.into(), base))
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i8")]
+        fn _svld1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8;
+    }
+    simd_cast(_svld1sb_s64(pg.into(), base))
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i16")]
+        fn _svld1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16;
+    }
+    simd_cast(_svld1sh_s64(pg.into(), base))
+}
+#[doc = "Load 32-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sw))]
+pub unsafe fn svld1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i32")]
+        fn _svld1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32;
+    }
+    simd_cast(_svld1sw_s64(pg.into(), base))
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t {
+    svld1sb_s16(pg, base).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t {
+    svld1sb_s32(pg, base).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t {
+    svld1sh_s32(pg, base).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t {
+    svld1sb_s64(pg, base).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t {
+    svld1sh_s64(pg, base).as_unsigned()
+}
+#[doc = "Load 32-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sw))]
+pub unsafe fn svld1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t {
+    svld1sw_s64(pg, base).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t {
+    svld1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t {
+    svld1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t {
+    svld1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t {
+    svld1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t {
+    svld1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 32-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_vnum_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sw))]
+pub unsafe fn svld1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t {
+    svld1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t {
+    svld1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t {
+    svld1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t {
+    svld1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sb_vnum_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sb))]
+pub unsafe fn svld1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t {
+    svld1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_vnum_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t {
+    svld1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 32-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_vnum_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sw))]
+pub unsafe fn svld1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t {
+    svld1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]index_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_s32index_s32(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16"
+        )]
+        fn _svld1sh_gather_s32index_s32(
+            pg: svbool4_t,
+            base: *const i16,
+            indices: svint32_t,
+        ) -> nxv4i16;
+    }
+    simd_cast(_svld1sh_gather_s32index_s32(pg.into(), base, indices))
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s32]index_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_s32index_u32(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svint32_t,
+) -> svuint32_t {
+    svld1sh_gather_s32index_s32(pg, base, indices).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_s64index_s64(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i16"
+        )]
+        fn _svld1sh_gather_s64index_s64(
+            pg: svbool2_t,
+            base: *const i16,
+            indices: svint64_t,
+        ) -> nxv2i16;
+    }
+    simd_cast(_svld1sh_gather_s64index_s64(pg.into(), base, indices))
+}
+#[doc = "Load 32-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sw))]
+pub unsafe fn svld1sw_gather_s64index_s64(
+    pg: svbool_t,
+    base: *const i32,
+    indices: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i32"
+        )]
+        fn _svld1sw_gather_s64index_s64(
+            pg: svbool2_t,
+            base: *const i32,
+            indices: svint64_t,
+        ) -> nxv2i32;
+    }
+    simd_cast(_svld1sw_gather_s64index_s64(pg.into(), base, indices))
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[s64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_s64index_u64(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svint64_t,
+) -> svuint64_t {
+    svld1sh_gather_s64index_s64(pg, base, indices).as_unsigned()
+}
+#[doc = "Load 32-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[s64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sw))]
+pub unsafe fn svld1sw_gather_s64index_u64(
+    pg: svbool_t,
+    base: *const i32,
+    indices: svint64_t,
+) -> svuint64_t {
+    svld1sw_gather_s64index_s64(pg, base, indices).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]index_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u32index_s32(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svuint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16"
+        )]
+        fn _svld1sh_gather_u32index_s32(
+            pg: svbool4_t,
+            base: *const i16,
+            indices: svint32_t,
+        ) -> nxv4i16;
+    }
+    simd_cast(_svld1sh_gather_u32index_s32(
+        pg.into(),
+        base,
+        indices.as_signed(),
+    ))
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u32]index_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u32index_u32(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svuint32_t,
+) -> svuint32_t {
+    svld1sh_gather_u32index_s32(pg, base, indices).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u64index_s64(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svuint64_t,
+) -> svint64_t {
+    svld1sh_gather_s64index_s64(pg, base, indices.as_signed())
+}
+#[doc = "Load 32-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sw))]
+pub unsafe fn svld1sw_gather_u64index_s64(
+    pg: svbool_t,
+    base: *const i32,
+    indices: svuint64_t,
+) -> svint64_t {
+    svld1sw_gather_s64index_s64(pg, base, indices.as_signed())
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather_[u64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u64index_u64(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svuint64_t,
+) -> svuint64_t {
+    svld1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned()
+}
+#[doc = "Load 32-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather_[u64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sw))]
+pub unsafe fn svld1sw_gather_u64index_u64(
+    pg: svbool_t,
+    base: *const i32,
+    indices: svuint64_t,
+) -> svuint64_t {
+    svld1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_index_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u32base_index_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svint32_t {
+    svld1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u32base]_index_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u32base_index_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svuint32_t {
+    svld1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svint64_t {
+    svld1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 32-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sw))]
+pub unsafe fn svld1sw_gather_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svint64_t {
+    svld1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Load 16-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sh_gather[_u64base]_index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sh))]
+pub unsafe fn svld1sh_gather_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svuint64_t {
+    svld1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 32-bit data and sign-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1sw_gather[_u64base]_index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1sw))]
+pub unsafe fn svld1sw_gather_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svuint64_t {
+    svld1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_gather_s32offset_s32(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svint32_t,
+) -> svint32_t {
+    svld1ub_gather_s32offset_u32(pg, base, offsets).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_s32offset_s32(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svint32_t,
+) -> svint32_t {
+    svld1uh_gather_s32offset_u32(pg, base, offsets).as_signed()
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_gather_s32offset_u32(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svint32_t,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8"
+        )]
+        fn _svld1ub_gather_s32offset_u32(
+            pg: svbool4_t,
+            base: *const i8,
+            offsets: svint32_t,
+        ) -> nxv4i8;
+    }
+    simd_cast::<nxv4u8, _>(
+        _svld1ub_gather_s32offset_u32(pg.into(), base.as_signed(), offsets).as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_s32offset_u32(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svint32_t,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16"
+        )]
+        fn _svld1uh_gather_s32offset_u32(
+            pg: svbool4_t,
+            base: *const i16,
+            offsets: svint32_t,
+        ) -> nxv4i16;
+    }
+    simd_cast::<nxv4u16, _>(
+        _svld1uh_gather_s32offset_u32(pg.into(), base.as_signed(), offsets).as_unsigned(),
+    )
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svint64_t,
+) -> svint64_t {
+    svld1ub_gather_s64offset_u64(pg, base, offsets).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svint64_t,
+) -> svint64_t {
+    svld1uh_gather_s64offset_u64(pg, base, offsets).as_signed()
+}
+#[doc = "Load 32-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1uw_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const u32,
+    offsets: svint64_t,
+) -> svint64_t {
+    svld1uw_gather_s64offset_u64(pg, base, offsets).as_signed()
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[s64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svint64_t,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.nxv2i8"
+        )]
+        fn _svld1ub_gather_s64offset_u64(
+            pg: svbool2_t,
+            base: *const i8,
+            offsets: svint64_t,
+        ) -> nxv2i8;
+    }
+    simd_cast::<nxv2u8, _>(
+        _svld1ub_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svint64_t,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.nxv2i16"
+        )]
+        fn _svld1uh_gather_s64offset_u64(
+            pg: svbool2_t,
+            base: *const i16,
+            offsets: svint64_t,
+        ) -> nxv2i16;
+    }
+    simd_cast::<nxv2u16, _>(
+        _svld1uh_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(),
+    )
+}
+#[doc = "Load 32-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1uw_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const u32,
+    offsets: svint64_t,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.nxv2i32"
+        )]
+        fn _svld1uw_gather_s64offset_u64(
+            pg: svbool2_t,
+            base: *const i32,
+            offsets: svint64_t,
+        ) -> nxv2i32;
+    }
+    simd_cast::<nxv2u32, _>(
+        _svld1uw_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(),
+    )
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_gather_u32offset_s32(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svuint32_t,
+) -> svint32_t {
+    svld1ub_gather_u32offset_u32(pg, base, offsets).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u32offset_s32(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svuint32_t,
+) -> svint32_t {
+    svld1uh_gather_u32offset_u32(pg, base, offsets).as_signed()
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_gather_u32offset_u32(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svuint32_t,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8"
+        )]
+        fn _svld1ub_gather_u32offset_u32(
+            pg: svbool4_t,
+            base: *const i8,
+            offsets: svint32_t,
+        ) -> nxv4i8;
+    }
+    simd_cast::<nxv4u8, _>(
+        _svld1ub_gather_u32offset_u32(pg.into(), base.as_signed(), offsets.as_signed())
+            .as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u32offset_u32(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svuint32_t,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16"
+        )]
+        fn _svld1uh_gather_u32offset_u32(
+            pg: svbool4_t,
+            base: *const i16,
+            offsets: svint32_t,
+        ) -> nxv4i16;
+    }
+    simd_cast::<nxv4u16, _>(
+        _svld1uh_gather_u32offset_u32(pg.into(), base.as_signed(), offsets.as_signed())
+            .as_unsigned(),
+    )
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svld1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svld1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed()
+}
+#[doc = "Load 32-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1uw_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const u32,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svld1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed()
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather_[u64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svld1ub_gather_s64offset_u64(pg, base, offsets.as_signed())
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svld1uh_gather_s64offset_u64(pg, base, offsets.as_signed())
+}
+#[doc = "Load 32-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1uw_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const u32,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svld1uw_gather_s64offset_u64(pg, base, offsets.as_signed())
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_gather_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svint32_t {
+    svld1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svint32_t {
+    svld1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed()
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_gather_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i8.nxv4i32"
+        )]
+        fn _svld1ub_gather_u32base_offset_u32(
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        ) -> nxv4i8;
+    }
+    simd_cast::<nxv4u8, _>(
+        _svld1ub_gather_u32base_offset_u32(pg.into(), bases.as_signed(), offset).as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv4i16.nxv4i32"
+        )]
+        fn _svld1uh_gather_u32base_offset_u32(
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        ) -> nxv4i16;
+    }
+    simd_cast::<nxv4u16, _>(
+        _svld1uh_gather_u32base_offset_u32(pg.into(), bases.as_signed(), offset).as_unsigned(),
+    )
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    svld1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    svld1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed()
+}
+#[doc = "Load 32-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1uw_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    svld1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed()
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i8.nxv2i64"
+        )]
+        fn _svld1ub_gather_u64base_offset_u64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> nxv2i8;
+    }
+    simd_cast::<nxv2u8, _>(
+        _svld1ub_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i16.nxv2i64"
+        )]
+        fn _svld1uh_gather_u64base_offset_u64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> nxv2i16;
+    }
+    simd_cast::<nxv2u16, _>(
+        _svld1uh_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(),
+    )
+}
+#[doc = "Load 32-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1uw_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.scalar.offset.nxv2i32.nxv2i64"
+        )]
+        fn _svld1uw_gather_u64base_offset_u64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> nxv2i32;
+    }
+    simd_cast::<nxv2u32, _>(
+        _svld1uw_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(),
+    )
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
+    svld1ub_gather_u32base_offset_s32(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
+    svld1uh_gather_u32base_offset_s32(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u32base]_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
+    svld1ub_gather_u32base_offset_u32(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
+    svld1uh_gather_u32base_offset_u32(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svld1ub_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svld1uh_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Load 32-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svld1uw_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svld1ub_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svld1uh_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Load 32-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svld1uw_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv8i8")]
+        fn _svld1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8;
+    }
+    simd_cast::<nxv8u8, _>(_svld1ub_s16(pg.into(), base.as_signed()).as_unsigned())
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i8")]
+        fn _svld1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8;
+    }
+    simd_cast::<nxv4u8, _>(_svld1ub_s32(pg.into(), base.as_signed()).as_unsigned())
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv4i16")]
+        fn _svld1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16;
+    }
+    simd_cast::<nxv4u16, _>(_svld1uh_s32(pg.into(), base.as_signed()).as_unsigned())
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i8")]
+        fn _svld1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8;
+    }
+    simd_cast::<nxv2u8, _>(_svld1ub_s64(pg.into(), base.as_signed()).as_unsigned())
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i16")]
+        fn _svld1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16;
+    }
+    simd_cast::<nxv2u16, _>(_svld1uh_s64(pg.into(), base.as_signed()).as_unsigned())
+}
+#[doc = "Load 32-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ld1.nxv2i32")]
+        fn _svld1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32;
+    }
+    simd_cast::<nxv2u32, _>(_svld1uw_s64(pg.into(), base.as_signed()).as_unsigned())
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t {
+    svld1ub_s16(pg, base).as_unsigned()
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t {
+    svld1ub_s32(pg, base).as_unsigned()
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t {
+    svld1uh_s32(pg, base).as_unsigned()
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t {
+    svld1ub_s64(pg, base).as_unsigned()
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t {
+    svld1uh_s64(pg, base).as_unsigned()
+}
+#[doc = "Load 32-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t {
+    svld1uw_s64(pg, base).as_unsigned()
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t {
+    svld1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t {
+    svld1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t {
+    svld1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t {
+    svld1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t {
+    svld1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 32-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_vnum_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t {
+    svld1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t {
+    svld1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t {
+    svld1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t {
+    svld1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1ub_vnum_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1b))]
+pub unsafe fn svld1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t {
+    svld1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_vnum_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t {
+    svld1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 32-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_vnum_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t {
+    svld1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]index_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_s32index_s32(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svint32_t,
+) -> svint32_t {
+    svld1uh_gather_s32index_u32(pg, base, indices).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s32]index_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_s32index_u32(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svint32_t,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16"
+        )]
+        fn _svld1uh_gather_s32index_u32(
+            pg: svbool4_t,
+            base: *const i16,
+            indices: svint32_t,
+        ) -> nxv4i16;
+    }
+    simd_cast::<nxv4u16, _>(
+        _svld1uh_gather_s32index_u32(pg.into(), base.as_signed(), indices).as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_s64index_s64(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svint64_t,
+) -> svint64_t {
+    svld1uh_gather_s64index_u64(pg, base, indices).as_signed()
+}
+#[doc = "Load 32-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1uw_gather_s64index_s64(
+    pg: svbool_t,
+    base: *const u32,
+    indices: svint64_t,
+) -> svint64_t {
+    svld1uw_gather_s64index_u64(pg, base, indices).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[s64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_s64index_u64(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svint64_t,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i16"
+        )]
+        fn _svld1uh_gather_s64index_u64(
+            pg: svbool2_t,
+            base: *const i16,
+            indices: svint64_t,
+        ) -> nxv2i16;
+    }
+    simd_cast::<nxv2u16, _>(
+        _svld1uh_gather_s64index_u64(pg.into(), base.as_signed(), indices).as_unsigned(),
+    )
+}
+#[doc = "Load 32-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[s64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1uw_gather_s64index_u64(
+    pg: svbool_t,
+    base: *const u32,
+    indices: svint64_t,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.index.nxv2i32"
+        )]
+        fn _svld1uw_gather_s64index_u64(
+            pg: svbool2_t,
+            base: *const i32,
+            indices: svint64_t,
+        ) -> nxv2i32;
+    }
+    simd_cast::<nxv2u32, _>(
+        _svld1uw_gather_s64index_u64(pg.into(), base.as_signed(), indices).as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]index_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u32index_s32(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svuint32_t,
+) -> svint32_t {
+    svld1uh_gather_u32index_u32(pg, base, indices).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u32]index_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u32index_u32(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svuint32_t,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16"
+        )]
+        fn _svld1uh_gather_u32index_u32(
+            pg: svbool4_t,
+            base: *const i16,
+            indices: svint32_t,
+        ) -> nxv4i16;
+    }
+    simd_cast::<nxv4u16, _>(
+        _svld1uh_gather_u32index_u32(pg.into(), base.as_signed(), indices.as_signed())
+            .as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u64index_s64(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svuint64_t,
+) -> svint64_t {
+    svld1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed()
+}
+#[doc = "Load 32-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1uw_gather_u64index_s64(
+    pg: svbool_t,
+    base: *const u32,
+    indices: svuint64_t,
+) -> svint64_t {
+    svld1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather_[u64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u64index_u64(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svuint64_t,
+) -> svuint64_t {
+    svld1uh_gather_s64index_u64(pg, base, indices.as_signed())
+}
+#[doc = "Load 32-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather_[u64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1uw_gather_u64index_u64(
+    pg: svbool_t,
+    base: *const u32,
+    indices: svuint64_t,
+) -> svuint64_t {
+    svld1uw_gather_s64index_u64(pg, base, indices.as_signed())
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_index_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u32base_index_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svint32_t {
+    svld1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u32base]_index_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u32base_index_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svuint32_t {
+    svld1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svint64_t {
+    svld1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 32-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1uw_gather_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svint64_t {
+    svld1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Load 16-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uh_gather[_u64base]_index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1h))]
+pub unsafe fn svld1uh_gather_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svuint64_t {
+    svld1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 32-bit data and zero-extend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld1uw_gather[_u64base]_index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld1w))]
+pub unsafe fn svld1uw_gather_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svuint64_t {
+    svld1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2w))]
+pub unsafe fn svld2_f32(pg: svbool_t, base: *const f32) -> svfloat32x2_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld2.nxv8f32.nxv4i1"
+        )]
+        fn _svld2_f32(pg: svbool4_t, base: *const f32) -> svfloat32x2_t;
+    }
+    _svld2_f32(pg.into(), base)
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2d))]
+pub unsafe fn svld2_f64(pg: svbool_t, base: *const f64) -> svfloat64x2_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld2.nxv4f64.nxv2i1"
+        )]
+        fn _svld2_f64(pg: svbool2_t, base: *const f64) -> svfloat64x2_t;
+    }
+    _svld2_f64(pg.into(), base)
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2b))]
+pub unsafe fn svld2_s8(pg: svbool_t, base: *const i8) -> svint8x2_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld2.nxv32i8.nxv16i1"
+        )]
+        fn _svld2_s8(pg: svbool_t, base: *const i8) -> svint8x2_t;
+    }
+    _svld2_s8(pg, base)
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2h))]
+pub unsafe fn svld2_s16(pg: svbool_t, base: *const i16) -> svint16x2_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld2.nxv16i16.nxv8i1"
+        )]
+        fn _svld2_s16(pg: svbool8_t, base: *const i16) -> svint16x2_t;
+    }
+    _svld2_s16(pg.into(), base)
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2w))]
+pub unsafe fn svld2_s32(pg: svbool_t, base: *const i32) -> svint32x2_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld2.nxv8i32.nxv4i1"
+        )]
+        fn _svld2_s32(pg: svbool4_t, base: *const i32) -> svint32x2_t;
+    }
+    _svld2_s32(pg.into(), base)
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2d))]
+pub unsafe fn svld2_s64(pg: svbool_t, base: *const i64) -> svint64x2_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld2.nxv4i64.nxv2i1"
+        )]
+        fn _svld2_s64(pg: svbool2_t, base: *const i64) -> svint64x2_t;
+    }
+    _svld2_s64(pg.into(), base)
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2b))]
+pub unsafe fn svld2_u8(pg: svbool_t, base: *const u8) -> svuint8x2_t {
+    svld2_s8(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2h))]
+pub unsafe fn svld2_u16(pg: svbool_t, base: *const u16) -> svuint16x2_t {
+    svld2_s16(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2w))]
+pub unsafe fn svld2_u32(pg: svbool_t, base: *const u32) -> svuint32x2_t {
+    svld2_s32(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2d))]
+pub unsafe fn svld2_u64(pg: svbool_t, base: *const u64) -> svuint64x2_t {
+    svld2_s64(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2w))]
+pub unsafe fn svld2_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x2_t {
+    svld2_f32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2d))]
+pub unsafe fn svld2_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x2_t {
+    svld2_f64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2b))]
+pub unsafe fn svld2_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x2_t {
+    svld2_s8(pg, base.offset(svcntb() as isize * vnum as isize))
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2h))]
+pub unsafe fn svld2_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x2_t {
+    svld2_s16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2w))]
+pub unsafe fn svld2_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x2_t {
+    svld2_s32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2d))]
+pub unsafe fn svld2_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x2_t {
+    svld2_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2b))]
+pub unsafe fn svld2_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x2_t {
+    svld2_u8(pg, base.offset(svcntb() as isize * vnum as isize))
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2h))]
+pub unsafe fn svld2_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x2_t {
+    svld2_u16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2w))]
+pub unsafe fn svld2_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x2_t {
+    svld2_u32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load two-element tuples into two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld2_vnum[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld2d))]
+pub unsafe fn svld2_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x2_t {
+    svld2_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3w))]
+pub unsafe fn svld3_f32(pg: svbool_t, base: *const f32) -> svfloat32x3_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld3.nxv12f32.nxv4i1"
+        )]
+        fn _svld3_f32(pg: svbool4_t, base: *const f32) -> svfloat32x3_t;
+    }
+    _svld3_f32(pg.into(), base)
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3d))]
+pub unsafe fn svld3_f64(pg: svbool_t, base: *const f64) -> svfloat64x3_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld3.nxv6f64.nxv2i1"
+        )]
+        fn _svld3_f64(pg: svbool2_t, base: *const f64) -> svfloat64x3_t;
+    }
+    _svld3_f64(pg.into(), base)
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3b))]
+pub unsafe fn svld3_s8(pg: svbool_t, base: *const i8) -> svint8x3_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld3.nxv48i8.nxv16i1"
+        )]
+        fn _svld3_s8(pg: svbool_t, base: *const i8) -> svint8x3_t;
+    }
+    _svld3_s8(pg, base)
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3h))]
+pub unsafe fn svld3_s16(pg: svbool_t, base: *const i16) -> svint16x3_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld3.nxv24i16.nxv8i1"
+        )]
+        fn _svld3_s16(pg: svbool8_t, base: *const i16) -> svint16x3_t;
+    }
+    _svld3_s16(pg.into(), base)
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3w))]
+pub unsafe fn svld3_s32(pg: svbool_t, base: *const i32) -> svint32x3_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld3.nxv12i32.nxv4i1"
+        )]
+        fn _svld3_s32(pg: svbool4_t, base: *const i32) -> svint32x3_t;
+    }
+    _svld3_s32(pg.into(), base)
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3d))]
+pub unsafe fn svld3_s64(pg: svbool_t, base: *const i64) -> svint64x3_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld3.nxv6i64.nxv2i1"
+        )]
+        fn _svld3_s64(pg: svbool2_t, base: *const i64) -> svint64x3_t;
+    }
+    _svld3_s64(pg.into(), base)
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3b))]
+pub unsafe fn svld3_u8(pg: svbool_t, base: *const u8) -> svuint8x3_t {
+    svld3_s8(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3h))]
+pub unsafe fn svld3_u16(pg: svbool_t, base: *const u16) -> svuint16x3_t {
+    svld3_s16(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3w))]
+pub unsafe fn svld3_u32(pg: svbool_t, base: *const u32) -> svuint32x3_t {
+    svld3_s32(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3d))]
+pub unsafe fn svld3_u64(pg: svbool_t, base: *const u64) -> svuint64x3_t {
+    svld3_s64(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3w))]
+pub unsafe fn svld3_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x3_t {
+    svld3_f32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3d))]
+pub unsafe fn svld3_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x3_t {
+    svld3_f64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3b))]
+pub unsafe fn svld3_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x3_t {
+    svld3_s8(pg, base.offset(svcntb() as isize * vnum as isize))
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3h))]
+pub unsafe fn svld3_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x3_t {
+    svld3_s16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3w))]
+pub unsafe fn svld3_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x3_t {
+    svld3_s32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3d))]
+pub unsafe fn svld3_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x3_t {
+    svld3_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3b))]
+pub unsafe fn svld3_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x3_t {
+    svld3_u8(pg, base.offset(svcntb() as isize * vnum as isize))
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3h))]
+pub unsafe fn svld3_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x3_t {
+    svld3_u16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3w))]
+pub unsafe fn svld3_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x3_t {
+    svld3_u32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load three-element tuples into three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld3_vnum[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld3d))]
+pub unsafe fn svld3_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x3_t {
+    svld3_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4w))]
+pub unsafe fn svld4_f32(pg: svbool_t, base: *const f32) -> svfloat32x4_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld4.nxv16f32.nxv4i1"
+        )]
+        fn _svld4_f32(pg: svbool4_t, base: *const f32) -> svfloat32x4_t;
+    }
+    _svld4_f32(pg.into(), base)
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4d))]
+pub unsafe fn svld4_f64(pg: svbool_t, base: *const f64) -> svfloat64x4_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld4.nxv8f64.nxv2i1"
+        )]
+        fn _svld4_f64(pg: svbool2_t, base: *const f64) -> svfloat64x4_t;
+    }
+    _svld4_f64(pg.into(), base)
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4b))]
+pub unsafe fn svld4_s8(pg: svbool_t, base: *const i8) -> svint8x4_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld4.nxv64i8.nxv16i1"
+        )]
+        fn _svld4_s8(pg: svbool_t, base: *const i8) -> svint8x4_t;
+    }
+    _svld4_s8(pg, base)
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4h))]
+pub unsafe fn svld4_s16(pg: svbool_t, base: *const i16) -> svint16x4_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld4.nxv32i16.nxv8i1"
+        )]
+        fn _svld4_s16(pg: svbool8_t, base: *const i16) -> svint16x4_t;
+    }
+    _svld4_s16(pg.into(), base)
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4w))]
+pub unsafe fn svld4_s32(pg: svbool_t, base: *const i32) -> svint32x4_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld4.nxv16i32.nxv4i1"
+        )]
+        fn _svld4_s32(pg: svbool4_t, base: *const i32) -> svint32x4_t;
+    }
+    _svld4_s32(pg.into(), base)
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4d))]
+pub unsafe fn svld4_s64(pg: svbool_t, base: *const i64) -> svint64x4_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ld4.nxv8i64.nxv2i1"
+        )]
+        fn _svld4_s64(pg: svbool2_t, base: *const i64) -> svint64x4_t;
+    }
+    _svld4_s64(pg.into(), base)
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4b))]
+pub unsafe fn svld4_u8(pg: svbool_t, base: *const u8) -> svuint8x4_t {
+    svld4_s8(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4h))]
+pub unsafe fn svld4_u16(pg: svbool_t, base: *const u16) -> svuint16x4_t {
+    svld4_s16(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4w))]
+pub unsafe fn svld4_u32(pg: svbool_t, base: *const u32) -> svuint32x4_t {
+    svld4_s32(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4d))]
+pub unsafe fn svld4_u64(pg: svbool_t, base: *const u64) -> svuint64x4_t {
+    svld4_s64(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4w))]
+pub unsafe fn svld4_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32x4_t {
+    svld4_f32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4d))]
+pub unsafe fn svld4_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64x4_t {
+    svld4_f64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4b))]
+pub unsafe fn svld4_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x4_t {
+    svld4_s8(pg, base.offset(svcntb() as isize * vnum as isize))
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4h))]
+pub unsafe fn svld4_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16x4_t {
+    svld4_s16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4w))]
+pub unsafe fn svld4_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32x4_t {
+    svld4_s32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4d))]
+pub unsafe fn svld4_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64x4_t {
+    svld4_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4b))]
+pub unsafe fn svld4_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8x4_t {
+    svld4_u8(pg, base.offset(svcntb() as isize * vnum as isize))
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4h))]
+pub unsafe fn svld4_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16x4_t {
+    svld4_u16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4w))]
+pub unsafe fn svld4_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32x4_t {
+    svld4_u32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load four-element tuples into four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svld4_vnum[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ld4d))]
+pub unsafe fn svld4_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64x4_t {
+    svld4_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4f32")]
+        fn _svldff1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t;
+    }
+    _svldff1_f32(pg.into(), base)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2f64")]
+        fn _svldff1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t;
+    }
+    _svldff1_f64(pg.into(), base)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1_s8(pg: svbool_t, base: *const i8) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv16i8")]
+        fn _svldff1_s8(pg: svbool_t, base: *const i8) -> svint8_t;
+    }
+    _svldff1_s8(pg, base)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1_s16(pg: svbool_t, base: *const i16) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i16")]
+        fn _svldff1_s16(pg: svbool8_t, base: *const i16) -> svint16_t;
+    }
+    _svldff1_s16(pg.into(), base)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_s32(pg: svbool_t, base: *const i32) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i32")]
+        fn _svldff1_s32(pg: svbool4_t, base: *const i32) -> svint32_t;
+    }
+    _svldff1_s32(pg.into(), base)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_s64(pg: svbool_t, base: *const i64) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i64")]
+        fn _svldff1_s64(pg: svbool2_t, base: *const i64) -> svint64_t;
+    }
+    _svldff1_s64(pg.into(), base)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1_u8(pg: svbool_t, base: *const u8) -> svuint8_t {
+    svldff1_s8(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1_u16(pg: svbool_t, base: *const u16) -> svuint16_t {
+    svldff1_s16(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_u32(pg: svbool_t, base: *const u32) -> svuint32_t {
+    svldff1_s32(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_u64(pg: svbool_t, base: *const u64) -> svuint64_t {
+    svldff1_s64(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_s32index_f32(
+    pg: svbool_t,
+    base: *const f32,
+    indices: svint32_t,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4f32"
+        )]
+        fn _svldff1_gather_s32index_f32(
+            pg: svbool4_t,
+            base: *const f32,
+            indices: svint32_t,
+        ) -> svfloat32_t;
+    }
+    _svldff1_gather_s32index_f32(pg.into(), base, indices)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_s32index_s32(
+    pg: svbool_t,
+    base: *const i32,
+    indices: svint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i32"
+        )]
+        fn _svldff1_gather_s32index_s32(
+            pg: svbool4_t,
+            base: *const i32,
+            indices: svint32_t,
+        ) -> svint32_t;
+    }
+    _svldff1_gather_s32index_s32(pg.into(), base, indices)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]index[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_s32index_u32(
+    pg: svbool_t,
+    base: *const u32,
+    indices: svint32_t,
+) -> svuint32_t {
+    svldff1_gather_s32index_s32(pg, base.as_signed(), indices).as_unsigned()
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_s64index_f64(
+    pg: svbool_t,
+    base: *const f64,
+    indices: svint64_t,
+) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2f64"
+        )]
+        fn _svldff1_gather_s64index_f64(
+            pg: svbool2_t,
+            base: *const f64,
+            indices: svint64_t,
+        ) -> svfloat64_t;
+    }
+    _svldff1_gather_s64index_f64(pg.into(), base, indices)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_s64index_s64(
+    pg: svbool_t,
+    base: *const i64,
+    indices: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i64"
+        )]
+        fn _svldff1_gather_s64index_s64(
+            pg: svbool2_t,
+            base: *const i64,
+            indices: svint64_t,
+        ) -> svint64_t;
+    }
+    _svldff1_gather_s64index_s64(pg.into(), base, indices)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_s64index_u64(
+    pg: svbool_t,
+    base: *const u64,
+    indices: svint64_t,
+) -> svuint64_t {
+    svldff1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned()
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_u32index_f32(
+    pg: svbool_t,
+    base: *const f32,
+    indices: svuint32_t,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4f32"
+        )]
+        fn _svldff1_gather_u32index_f32(
+            pg: svbool4_t,
+            base: *const f32,
+            indices: svint32_t,
+        ) -> svfloat32_t;
+    }
+    _svldff1_gather_u32index_f32(pg.into(), base, indices.as_signed())
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_u32index_s32(
+    pg: svbool_t,
+    base: *const i32,
+    indices: svuint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i32"
+        )]
+        fn _svldff1_gather_u32index_s32(
+            pg: svbool4_t,
+            base: *const i32,
+            indices: svint32_t,
+        ) -> svint32_t;
+    }
+    _svldff1_gather_u32index_s32(pg.into(), base, indices.as_signed())
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]index[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_u32index_u32(
+    pg: svbool_t,
+    base: *const u32,
+    indices: svuint32_t,
+) -> svuint32_t {
+    svldff1_gather_u32index_s32(pg, base.as_signed(), indices).as_unsigned()
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_u64index_f64(
+    pg: svbool_t,
+    base: *const f64,
+    indices: svuint64_t,
+) -> svfloat64_t {
+    svldff1_gather_s64index_f64(pg, base, indices.as_signed())
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_u64index_s64(
+    pg: svbool_t,
+    base: *const i64,
+    indices: svuint64_t,
+) -> svint64_t {
+    svldff1_gather_s64index_s64(pg, base, indices.as_signed())
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_u64index_u64(
+    pg: svbool_t,
+    base: *const u64,
+    indices: svuint64_t,
+) -> svuint64_t {
+    svldff1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_s32offset_f32(
+    pg: svbool_t,
+    base: *const f32,
+    offsets: svint32_t,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4f32"
+        )]
+        fn _svldff1_gather_s32offset_f32(
+            pg: svbool4_t,
+            base: *const f32,
+            offsets: svint32_t,
+        ) -> svfloat32_t;
+    }
+    _svldff1_gather_s32offset_f32(pg.into(), base, offsets)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_s32offset_s32(
+    pg: svbool_t,
+    base: *const i32,
+    offsets: svint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i32"
+        )]
+        fn _svldff1_gather_s32offset_s32(
+            pg: svbool4_t,
+            base: *const i32,
+            offsets: svint32_t,
+        ) -> svint32_t;
+    }
+    _svldff1_gather_s32offset_s32(pg.into(), base, offsets)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s32]offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_s32offset_u32(
+    pg: svbool_t,
+    base: *const u32,
+    offsets: svint32_t,
+) -> svuint32_t {
+    svldff1_gather_s32offset_s32(pg, base.as_signed(), offsets).as_unsigned()
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_s64offset_f64(
+    pg: svbool_t,
+    base: *const f64,
+    offsets: svint64_t,
+) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.nxv2f64"
+        )]
+        fn _svldff1_gather_s64offset_f64(
+            pg: svbool2_t,
+            base: *const f64,
+            offsets: svint64_t,
+        ) -> svfloat64_t;
+    }
+    _svldff1_gather_s64offset_f64(pg.into(), base, offsets)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const i64,
+    offsets: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i64"
+        )]
+        fn _svldff1_gather_s64offset_s64(
+            pg: svbool2_t,
+            base: *const i64,
+            offsets: svint64_t,
+        ) -> svint64_t;
+    }
+    _svldff1_gather_s64offset_s64(pg.into(), base, offsets)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[s64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const u64,
+    offsets: svint64_t,
+) -> svuint64_t {
+    svldff1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned()
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_u32offset_f32(
+    pg: svbool_t,
+    base: *const f32,
+    offsets: svuint32_t,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4f32"
+        )]
+        fn _svldff1_gather_u32offset_f32(
+            pg: svbool4_t,
+            base: *const f32,
+            offsets: svint32_t,
+        ) -> svfloat32_t;
+    }
+    _svldff1_gather_u32offset_f32(pg.into(), base, offsets.as_signed())
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_u32offset_s32(
+    pg: svbool_t,
+    base: *const i32,
+    offsets: svuint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i32"
+        )]
+        fn _svldff1_gather_u32offset_s32(
+            pg: svbool4_t,
+            base: *const i32,
+            offsets: svint32_t,
+        ) -> svint32_t;
+    }
+    _svldff1_gather_u32offset_s32(pg.into(), base, offsets.as_signed())
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u32]offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_u32offset_u32(
+    pg: svbool_t,
+    base: *const u32,
+    offsets: svuint32_t,
+) -> svuint32_t {
+    svldff1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned()
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_u64offset_f64(
+    pg: svbool_t,
+    base: *const f64,
+    offsets: svuint64_t,
+) -> svfloat64_t {
+    svldff1_gather_s64offset_f64(pg, base, offsets.as_signed())
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const i64,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svldff1_gather_s64offset_s64(pg, base, offsets.as_signed())
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather_[u64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const u64,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svldff1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_f32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t {
+    svldff1_gather_u32base_offset_f32(pg, bases, 0)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
+    svldff1_gather_u32base_offset_s32(pg, bases, 0)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
+    svldff1_gather_u32base_offset_u32(pg, bases, 0)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_f64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t {
+    svldff1_gather_u64base_offset_f64(pg, bases, 0)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svldff1_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svldff1_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_f32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_u32base_index_f32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svfloat32_t {
+    svldff1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_u32base_index_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svint32_t {
+    svldff1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_index_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_u32base_index_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svuint32_t {
+    svldff1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_f64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_u64base_index_f64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svfloat64_t {
+    svldff1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3))
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svint64_t {
+    svldff1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3))
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svuint64_t {
+    svldff1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3))
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_f32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_u32base_offset_f32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4f32.nxv4i32"
+        )]
+        fn _svldff1_gather_u32base_offset_f32(
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        ) -> svfloat32_t;
+    }
+    _svldff1_gather_u32base_offset_f32(pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i32.nxv4i32"
+        )]
+        fn _svldff1_gather_u32base_offset_s32(
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        ) -> svint32_t;
+    }
+    _svldff1_gather_u32base_offset_s32(pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u32base]_offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_gather_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svuint32_t {
+    svldff1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned()
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_f64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_u64base_offset_f64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2f64.nxv2i64"
+        )]
+        fn _svldff1_gather_u64base_offset_f64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> svfloat64_t;
+    }
+    _svldff1_gather_u64base_offset_f64(pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i64.nxv2i64"
+        )]
+        fn _svldff1_gather_u64base_offset_s64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> svint64_t;
+    }
+    _svldff1_gather_u64base_offset_s64(pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    svldff1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t {
+    svldff1_f32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t {
+    svldff1_f64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t {
+    svldff1_s8(pg, base.offset(svcntb() as isize * vnum as isize))
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t {
+    svldff1_s16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t {
+    svldff1_s32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t {
+    svldff1_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t {
+    svldff1_u8(pg, base.offset(svcntb() as isize * vnum as isize))
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t {
+    svldff1_u16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t {
+    svldff1_u32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Unextended load, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1_vnum[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1d))]
+pub unsafe fn svldff1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t {
+    svldff1_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_gather_s32offset_s32(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8"
+        )]
+        fn _svldff1sb_gather_s32offset_s32(
+            pg: svbool4_t,
+            base: *const i8,
+            offsets: svint32_t,
+        ) -> nxv4i8;
+    }
+    simd_cast(_svldff1sb_gather_s32offset_s32(pg.into(), base, offsets))
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_s32offset_s32(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16"
+        )]
+        fn _svldff1sh_gather_s32offset_s32(
+            pg: svbool4_t,
+            base: *const i16,
+            offsets: svint32_t,
+        ) -> nxv4i16;
+    }
+    simd_cast(_svldff1sh_gather_s32offset_s32(pg.into(), base, offsets))
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_gather_s32offset_u32(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svint32_t,
+) -> svuint32_t {
+    svldff1sb_gather_s32offset_s32(pg, base, offsets).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_s32offset_u32(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svint32_t,
+) -> svuint32_t {
+    svldff1sh_gather_s32offset_s32(pg, base, offsets).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i8"
+        )]
+        fn _svldff1sb_gather_s64offset_s64(
+            pg: svbool2_t,
+            base: *const i8,
+            offsets: svint64_t,
+        ) -> nxv2i8;
+    }
+    simd_cast(_svldff1sb_gather_s64offset_s64(pg.into(), base, offsets))
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i16"
+        )]
+        fn _svldff1sh_gather_s64offset_s64(
+            pg: svbool2_t,
+            base: *const i16,
+            offsets: svint64_t,
+        ) -> nxv2i16;
+    }
+    simd_cast(_svldff1sh_gather_s64offset_s64(pg.into(), base, offsets))
+}
+#[doc = "Load 32-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sw))]
+pub unsafe fn svldff1sw_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const i32,
+    offsets: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i32"
+        )]
+        fn _svldff1sw_gather_s64offset_s64(
+            pg: svbool2_t,
+            base: *const i32,
+            offsets: svint64_t,
+        ) -> nxv2i32;
+    }
+    simd_cast(_svldff1sw_gather_s64offset_s64(pg.into(), base, offsets))
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[s64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svint64_t,
+) -> svuint64_t {
+    svldff1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svint64_t,
+) -> svuint64_t {
+    svldff1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned()
+}
+#[doc = "Load 32-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sw))]
+pub unsafe fn svldff1sw_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const i32,
+    offsets: svint64_t,
+) -> svuint64_t {
+    svldff1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_gather_u32offset_s32(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svuint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8"
+        )]
+        fn _svldff1sb_gather_u32offset_s32(
+            pg: svbool4_t,
+            base: *const i8,
+            offsets: svint32_t,
+        ) -> nxv4i8;
+    }
+    simd_cast(_svldff1sb_gather_u32offset_s32(
+        pg.into(),
+        base,
+        offsets.as_signed(),
+    ))
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u32offset_s32(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svuint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16"
+        )]
+        fn _svldff1sh_gather_u32offset_s32(
+            pg: svbool4_t,
+            base: *const i16,
+            offsets: svint32_t,
+        ) -> nxv4i16;
+    }
+    simd_cast(_svldff1sh_gather_u32offset_s32(
+        pg.into(),
+        base,
+        offsets.as_signed(),
+    ))
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_gather_u32offset_u32(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svuint32_t,
+) -> svuint32_t {
+    svldff1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u32offset_u32(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svuint32_t,
+) -> svuint32_t {
+    svldff1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svldff1sb_gather_s64offset_s64(pg, base, offsets.as_signed())
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svldff1sh_gather_s64offset_s64(pg, base, offsets.as_signed())
+}
+#[doc = "Load 32-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sw))]
+pub unsafe fn svldff1sw_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const i32,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svldff1sw_gather_s64offset_s64(pg, base, offsets.as_signed())
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather_[u64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svldff1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svldff1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned()
+}
+#[doc = "Load 32-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sw))]
+pub unsafe fn svldff1sw_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const i32,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svldff1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_gather_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32"
+        )]
+        fn _svldff1sb_gather_u32base_offset_s32(
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        ) -> nxv4i8;
+    }
+    simd_cast(_svldff1sb_gather_u32base_offset_s32(
+        pg.into(),
+        bases.as_signed(),
+        offset,
+    ))
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32"
+        )]
+        fn _svldff1sh_gather_u32base_offset_s32(
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        ) -> nxv4i16;
+    }
+    simd_cast(_svldff1sh_gather_u32base_offset_s32(
+        pg.into(),
+        bases.as_signed(),
+        offset,
+    ))
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_gather_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svuint32_t {
+    svldff1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svuint32_t {
+    svldff1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64"
+        )]
+        fn _svldff1sb_gather_u64base_offset_s64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> nxv2i8;
+    }
+    simd_cast(_svldff1sb_gather_u64base_offset_s64(
+        pg.into(),
+        bases.as_signed(),
+        offset,
+    ))
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64"
+        )]
+        fn _svldff1sh_gather_u64base_offset_s64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> nxv2i16;
+    }
+    simd_cast(_svldff1sh_gather_u64base_offset_s64(
+        pg.into(),
+        bases.as_signed(),
+        offset,
+    ))
+}
+#[doc = "Load 32-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sw))]
+pub unsafe fn svldff1sw_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i32.nxv2i64"
+        )]
+        fn _svldff1sw_gather_u64base_offset_s64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> nxv2i32;
+    }
+    simd_cast(_svldff1sw_gather_u64base_offset_s64(
+        pg.into(),
+        bases.as_signed(),
+        offset,
+    ))
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    svldff1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    svldff1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
+}
+#[doc = "Load 32-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sw))]
+pub unsafe fn svldff1sw_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    svldff1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
+    svldff1sb_gather_u32base_offset_s32(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
+    svldff1sh_gather_u32base_offset_s32(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u32base]_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
+    svldff1sb_gather_u32base_offset_u32(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
+    svldff1sh_gather_u32base_offset_u32(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svldff1sb_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svldff1sh_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Load 32-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sw))]
+pub unsafe fn svldff1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svldff1sw_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svldff1sb_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svldff1sh_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Load 32-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sw))]
+pub unsafe fn svldff1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svldff1sw_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i8")]
+        fn _svldff1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8;
+    }
+    simd_cast(_svldff1sb_s16(pg.into(), base))
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i8")]
+        fn _svldff1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8;
+    }
+    simd_cast(_svldff1sb_s32(pg.into(), base))
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i16")]
+        fn _svldff1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16;
+    }
+    simd_cast(_svldff1sh_s32(pg.into(), base))
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i8")]
+        fn _svldff1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8;
+    }
+    simd_cast(_svldff1sb_s64(pg.into(), base))
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i16")]
+        fn _svldff1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16;
+    }
+    simd_cast(_svldff1sh_s64(pg.into(), base))
+}
+#[doc = "Load 32-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sw))]
+pub unsafe fn svldff1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i32")]
+        fn _svldff1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32;
+    }
+    simd_cast(_svldff1sw_s64(pg.into(), base))
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t {
+    svldff1sb_s16(pg, base).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t {
+    svldff1sb_s32(pg, base).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t {
+    svldff1sh_s32(pg, base).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t {
+    svldff1sb_s64(pg, base).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t {
+    svldff1sh_s64(pg, base).as_unsigned()
+}
+#[doc = "Load 32-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sw))]
+pub unsafe fn svldff1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t {
+    svldff1sw_s64(pg, base).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t {
+    svldff1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t {
+    svldff1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t {
+    svldff1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t {
+    svldff1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t {
+    svldff1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 32-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_vnum_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sw))]
+pub unsafe fn svldff1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t {
+    svldff1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t {
+    svldff1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t {
+    svldff1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t {
+    svldff1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sb_vnum_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sb))]
+pub unsafe fn svldff1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t {
+    svldff1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_vnum_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t {
+    svldff1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 32-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_vnum_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sw))]
+pub unsafe fn svldff1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t {
+    svldff1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]index_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_s32index_s32(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16"
+        )]
+        fn _svldff1sh_gather_s32index_s32(
+            pg: svbool4_t,
+            base: *const i16,
+            indices: svint32_t,
+        ) -> nxv4i16;
+    }
+    simd_cast(_svldff1sh_gather_s32index_s32(pg.into(), base, indices))
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s32]index_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_s32index_u32(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svint32_t,
+) -> svuint32_t {
+    svldff1sh_gather_s32index_s32(pg, base, indices).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_s64index_s64(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i16"
+        )]
+        fn _svldff1sh_gather_s64index_s64(
+            pg: svbool2_t,
+            base: *const i16,
+            indices: svint64_t,
+        ) -> nxv2i16;
+    }
+    simd_cast(_svldff1sh_gather_s64index_s64(pg.into(), base, indices))
+}
+#[doc = "Load 32-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sw))]
+pub unsafe fn svldff1sw_gather_s64index_s64(
+    pg: svbool_t,
+    base: *const i32,
+    indices: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i32"
+        )]
+        fn _svldff1sw_gather_s64index_s64(
+            pg: svbool2_t,
+            base: *const i32,
+            indices: svint64_t,
+        ) -> nxv2i32;
+    }
+    simd_cast(_svldff1sw_gather_s64index_s64(pg.into(), base, indices))
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[s64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_s64index_u64(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svint64_t,
+) -> svuint64_t {
+    svldff1sh_gather_s64index_s64(pg, base, indices).as_unsigned()
+}
+#[doc = "Load 32-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[s64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sw))]
+pub unsafe fn svldff1sw_gather_s64index_u64(
+    pg: svbool_t,
+    base: *const i32,
+    indices: svint64_t,
+) -> svuint64_t {
+    svldff1sw_gather_s64index_s64(pg, base, indices).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]index_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u32index_s32(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svuint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16"
+        )]
+        fn _svldff1sh_gather_u32index_s32(
+            pg: svbool4_t,
+            base: *const i16,
+            indices: svint32_t,
+        ) -> nxv4i16;
+    }
+    simd_cast(_svldff1sh_gather_u32index_s32(
+        pg.into(),
+        base,
+        indices.as_signed(),
+    ))
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u32]index_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u32index_u32(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svuint32_t,
+) -> svuint32_t {
+    svldff1sh_gather_u32index_s32(pg, base, indices).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u64index_s64(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svuint64_t,
+) -> svint64_t {
+    svldff1sh_gather_s64index_s64(pg, base, indices.as_signed())
+}
+#[doc = "Load 32-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sw))]
+pub unsafe fn svldff1sw_gather_u64index_s64(
+    pg: svbool_t,
+    base: *const i32,
+    indices: svuint64_t,
+) -> svint64_t {
+    svldff1sw_gather_s64index_s64(pg, base, indices.as_signed())
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather_[u64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u64index_u64(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svuint64_t,
+) -> svuint64_t {
+    svldff1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned()
+}
+#[doc = "Load 32-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather_[u64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sw))]
+pub unsafe fn svldff1sw_gather_u64index_u64(
+    pg: svbool_t,
+    base: *const i32,
+    indices: svuint64_t,
+) -> svuint64_t {
+    svldff1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_index_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u32base_index_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svint32_t {
+    svldff1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u32base]_index_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u32base_index_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svuint32_t {
+    svldff1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svint64_t {
+    svldff1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 32-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sw))]
+pub unsafe fn svldff1sw_gather_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svint64_t {
+    svldff1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Load 16-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sh_gather[_u64base]_index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sh))]
+pub unsafe fn svldff1sh_gather_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svuint64_t {
+    svldff1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 32-bit data and sign-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1sw_gather[_u64base]_index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1sw))]
+pub unsafe fn svldff1sw_gather_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svuint64_t {
+    svldff1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_gather_s32offset_s32(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svint32_t,
+) -> svint32_t {
+    svldff1ub_gather_s32offset_u32(pg, base, offsets).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_s32offset_s32(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svint32_t,
+) -> svint32_t {
+    svldff1uh_gather_s32offset_u32(pg, base, offsets).as_signed()
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_gather_s32offset_u32(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svint32_t,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i8"
+        )]
+        fn _svldff1ub_gather_s32offset_u32(
+            pg: svbool4_t,
+            base: *const i8,
+            offsets: svint32_t,
+        ) -> nxv4i8;
+    }
+    simd_cast::<nxv4u8, _>(
+        _svldff1ub_gather_s32offset_u32(pg.into(), base.as_signed(), offsets).as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_s32offset_u32(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svint32_t,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16"
+        )]
+        fn _svldff1uh_gather_s32offset_u32(
+            pg: svbool4_t,
+            base: *const i16,
+            offsets: svint32_t,
+        ) -> nxv4i16;
+    }
+    simd_cast::<nxv4u16, _>(
+        _svldff1uh_gather_s32offset_u32(pg.into(), base.as_signed(), offsets).as_unsigned(),
+    )
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svint64_t,
+) -> svint64_t {
+    svldff1ub_gather_s64offset_u64(pg, base, offsets).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svint64_t,
+) -> svint64_t {
+    svldff1uh_gather_s64offset_u64(pg, base, offsets).as_signed()
+}
+#[doc = "Load 32-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1uw_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const u32,
+    offsets: svint64_t,
+) -> svint64_t {
+    svldff1uw_gather_s64offset_u64(pg, base, offsets).as_signed()
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[s64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svint64_t,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i8"
+        )]
+        fn _svldff1ub_gather_s64offset_u64(
+            pg: svbool2_t,
+            base: *const i8,
+            offsets: svint64_t,
+        ) -> nxv2i8;
+    }
+    simd_cast::<nxv2u8, _>(
+        _svldff1ub_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svint64_t,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i16"
+        )]
+        fn _svldff1uh_gather_s64offset_u64(
+            pg: svbool2_t,
+            base: *const i16,
+            offsets: svint64_t,
+        ) -> nxv2i16;
+    }
+    simd_cast::<nxv2u16, _>(
+        _svldff1uh_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(),
+    )
+}
+#[doc = "Load 32-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1uw_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const u32,
+    offsets: svint64_t,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.nxv2i32"
+        )]
+        fn _svldff1uw_gather_s64offset_u64(
+            pg: svbool2_t,
+            base: *const i32,
+            offsets: svint64_t,
+        ) -> nxv2i32;
+    }
+    simd_cast::<nxv2u32, _>(
+        _svldff1uw_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(),
+    )
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_gather_u32offset_s32(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svuint32_t,
+) -> svint32_t {
+    svldff1ub_gather_u32offset_u32(pg, base, offsets).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u32offset_s32(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svuint32_t,
+) -> svint32_t {
+    svldff1uh_gather_u32offset_u32(pg, base, offsets).as_signed()
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_gather_u32offset_u32(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svuint32_t,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i8"
+        )]
+        fn _svldff1ub_gather_u32offset_u32(
+            pg: svbool4_t,
+            base: *const i8,
+            offsets: svint32_t,
+        ) -> nxv4i8;
+    }
+    simd_cast::<nxv4u8, _>(
+        _svldff1ub_gather_u32offset_u32(pg.into(), base.as_signed(), offsets.as_signed())
+            .as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u32offset_u32(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svuint32_t,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16"
+        )]
+        fn _svldff1uh_gather_u32offset_u32(
+            pg: svbool4_t,
+            base: *const i16,
+            offsets: svint32_t,
+        ) -> nxv4i16;
+    }
+    simd_cast::<nxv4u16, _>(
+        _svldff1uh_gather_u32offset_u32(pg.into(), base.as_signed(), offsets.as_signed())
+            .as_unsigned(),
+    )
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svldff1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svldff1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed()
+}
+#[doc = "Load 32-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1uw_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const u32,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svldff1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed()
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather_[u64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svldff1ub_gather_s64offset_u64(pg, base, offsets.as_signed())
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svldff1uh_gather_s64offset_u64(pg, base, offsets.as_signed())
+}
+#[doc = "Load 32-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1uw_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const u32,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svldff1uw_gather_s64offset_u64(pg, base, offsets.as_signed())
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_gather_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svint32_t {
+    svldff1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svint32_t {
+    svldff1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed()
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_gather_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i8.nxv4i32"
+        )]
+        fn _svldff1ub_gather_u32base_offset_u32(
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        ) -> nxv4i8;
+    }
+    simd_cast::<nxv4u8, _>(
+        _svldff1ub_gather_u32base_offset_u32(pg.into(), bases.as_signed(), offset).as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32"
+        )]
+        fn _svldff1uh_gather_u32base_offset_u32(
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        ) -> nxv4i16;
+    }
+    simd_cast::<nxv4u16, _>(
+        _svldff1uh_gather_u32base_offset_u32(pg.into(), bases.as_signed(), offset).as_unsigned(),
+    )
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    svldff1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    svldff1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed()
+}
+#[doc = "Load 32-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1uw_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    svldff1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed()
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i8.nxv2i64"
+        )]
+        fn _svldff1ub_gather_u64base_offset_u64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> nxv2i8;
+    }
+    simd_cast::<nxv2u8, _>(
+        _svldff1ub_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64"
+        )]
+        fn _svldff1uh_gather_u64base_offset_u64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> nxv2i16;
+    }
+    simd_cast::<nxv2u16, _>(
+        _svldff1uh_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(),
+    )
+}
+#[doc = "Load 32-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1uw_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i32.nxv2i64"
+        )]
+        fn _svldff1uw_gather_u64base_offset_u64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> nxv2i32;
+    }
+    simd_cast::<nxv2u32, _>(
+        _svldff1uw_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(),
+    )
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
+    svldff1ub_gather_u32base_offset_s32(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
+    svldff1uh_gather_u32base_offset_s32(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u32base]_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
+    svldff1ub_gather_u32base_offset_u32(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
+    svldff1uh_gather_u32base_offset_u32(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svldff1ub_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svldff1uh_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Load 32-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svldff1uw_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svldff1ub_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svldff1uh_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Load 32-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svldff1uw_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv8i8")]
+        fn _svldff1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8;
+    }
+    simd_cast::<nxv8u8, _>(_svldff1ub_s16(pg.into(), base.as_signed()).as_unsigned())
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i8")]
+        fn _svldff1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8;
+    }
+    simd_cast::<nxv4u8, _>(_svldff1ub_s32(pg.into(), base.as_signed()).as_unsigned())
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv4i16")]
+        fn _svldff1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16;
+    }
+    simd_cast::<nxv4u16, _>(_svldff1uh_s32(pg.into(), base.as_signed()).as_unsigned())
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i8")]
+        fn _svldff1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8;
+    }
+    simd_cast::<nxv2u8, _>(_svldff1ub_s64(pg.into(), base.as_signed()).as_unsigned())
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i16")]
+        fn _svldff1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16;
+    }
+    simd_cast::<nxv2u16, _>(_svldff1uh_s64(pg.into(), base.as_signed()).as_unsigned())
+}
+#[doc = "Load 32-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldff1.nxv2i32")]
+        fn _svldff1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32;
+    }
+    simd_cast::<nxv2u32, _>(_svldff1uw_s64(pg.into(), base.as_signed()).as_unsigned())
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t {
+    svldff1ub_s16(pg, base).as_unsigned()
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t {
+    svldff1ub_s32(pg, base).as_unsigned()
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t {
+    svldff1uh_s32(pg, base).as_unsigned()
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t {
+    svldff1ub_s64(pg, base).as_unsigned()
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t {
+    svldff1uh_s64(pg, base).as_unsigned()
+}
+#[doc = "Load 32-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t {
+    svldff1uw_s64(pg, base).as_unsigned()
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t {
+    svldff1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t {
+    svldff1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t {
+    svldff1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t {
+    svldff1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t {
+    svldff1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 32-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_vnum_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t {
+    svldff1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t {
+    svldff1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t {
+    svldff1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t {
+    svldff1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1ub_vnum_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1b))]
+pub unsafe fn svldff1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t {
+    svldff1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_vnum_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t {
+    svldff1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 32-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_vnum_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t {
+    svldff1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]index_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_s32index_s32(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svint32_t,
+) -> svint32_t {
+    svldff1uh_gather_s32index_u32(pg, base, indices).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s32]index_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_s32index_u32(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svint32_t,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16"
+        )]
+        fn _svldff1uh_gather_s32index_u32(
+            pg: svbool4_t,
+            base: *const i16,
+            indices: svint32_t,
+        ) -> nxv4i16;
+    }
+    simd_cast::<nxv4u16, _>(
+        _svldff1uh_gather_s32index_u32(pg.into(), base.as_signed(), indices).as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_s64index_s64(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svint64_t,
+) -> svint64_t {
+    svldff1uh_gather_s64index_u64(pg, base, indices).as_signed()
+}
+#[doc = "Load 32-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1uw_gather_s64index_s64(
+    pg: svbool_t,
+    base: *const u32,
+    indices: svint64_t,
+) -> svint64_t {
+    svldff1uw_gather_s64index_u64(pg, base, indices).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[s64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_s64index_u64(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svint64_t,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i16"
+        )]
+        fn _svldff1uh_gather_s64index_u64(
+            pg: svbool2_t,
+            base: *const i16,
+            indices: svint64_t,
+        ) -> nxv2i16;
+    }
+    simd_cast::<nxv2u16, _>(
+        _svldff1uh_gather_s64index_u64(pg.into(), base.as_signed(), indices).as_unsigned(),
+    )
+}
+#[doc = "Load 32-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[s64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1uw_gather_s64index_u64(
+    pg: svbool_t,
+    base: *const u32,
+    indices: svint64_t,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.index.nxv2i32"
+        )]
+        fn _svldff1uw_gather_s64index_u64(
+            pg: svbool2_t,
+            base: *const i32,
+            indices: svint64_t,
+        ) -> nxv2i32;
+    }
+    simd_cast::<nxv2u32, _>(
+        _svldff1uw_gather_s64index_u64(pg.into(), base.as_signed(), indices).as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]index_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u32index_s32(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svuint32_t,
+) -> svint32_t {
+    svldff1uh_gather_u32index_u32(pg, base, indices).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u32]index_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u32index_u32(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svuint32_t,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16"
+        )]
+        fn _svldff1uh_gather_u32index_u32(
+            pg: svbool4_t,
+            base: *const i16,
+            indices: svint32_t,
+        ) -> nxv4i16;
+    }
+    simd_cast::<nxv4u16, _>(
+        _svldff1uh_gather_u32index_u32(pg.into(), base.as_signed(), indices.as_signed())
+            .as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u64index_s64(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svuint64_t,
+) -> svint64_t {
+    svldff1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed()
+}
+#[doc = "Load 32-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1uw_gather_u64index_s64(
+    pg: svbool_t,
+    base: *const u32,
+    indices: svuint64_t,
+) -> svint64_t {
+    svldff1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather_[u64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u64index_u64(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svuint64_t,
+) -> svuint64_t {
+    svldff1uh_gather_s64index_u64(pg, base, indices.as_signed())
+}
+#[doc = "Load 32-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather_[u64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1uw_gather_u64index_u64(
+    pg: svbool_t,
+    base: *const u32,
+    indices: svuint64_t,
+) -> svuint64_t {
+    svldff1uw_gather_s64index_u64(pg, base, indices.as_signed())
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_index_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u32base_index_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svint32_t {
+    svldff1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u32base]_index_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u32base_index_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svuint32_t {
+    svldff1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svint64_t {
+    svldff1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 32-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1uw_gather_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svint64_t {
+    svldff1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Load 16-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uh_gather[_u64base]_index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1h))]
+pub unsafe fn svldff1uh_gather_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svuint64_t {
+    svldff1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 32-bit data and zero-extend, first-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldff1uw_gather[_u64base]_index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldff1w))]
+pub unsafe fn svldff1uw_gather_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svuint64_t {
+    svldff1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1w))]
+pub unsafe fn svldnf1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4f32")]
+        fn _svldnf1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t;
+    }
+    _svldnf1_f32(pg.into(), base)
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1d))]
+pub unsafe fn svldnf1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2f64")]
+        fn _svldnf1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t;
+    }
+    _svldnf1_f64(pg.into(), base)
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1b))]
+pub unsafe fn svldnf1_s8(pg: svbool_t, base: *const i8) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv16i8")]
+        fn _svldnf1_s8(pg: svbool_t, base: *const i8) -> svint8_t;
+    }
+    _svldnf1_s8(pg, base)
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1h))]
+pub unsafe fn svldnf1_s16(pg: svbool_t, base: *const i16) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i16")]
+        fn _svldnf1_s16(pg: svbool8_t, base: *const i16) -> svint16_t;
+    }
+    _svldnf1_s16(pg.into(), base)
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1w))]
+pub unsafe fn svldnf1_s32(pg: svbool_t, base: *const i32) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i32")]
+        fn _svldnf1_s32(pg: svbool4_t, base: *const i32) -> svint32_t;
+    }
+    _svldnf1_s32(pg.into(), base)
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1d))]
+pub unsafe fn svldnf1_s64(pg: svbool_t, base: *const i64) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i64")]
+        fn _svldnf1_s64(pg: svbool2_t, base: *const i64) -> svint64_t;
+    }
+    _svldnf1_s64(pg.into(), base)
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1b))]
+pub unsafe fn svldnf1_u8(pg: svbool_t, base: *const u8) -> svuint8_t {
+    svldnf1_s8(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1h))]
+pub unsafe fn svldnf1_u16(pg: svbool_t, base: *const u16) -> svuint16_t {
+    svldnf1_s16(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1w))]
+pub unsafe fn svldnf1_u32(pg: svbool_t, base: *const u32) -> svuint32_t {
+    svldnf1_s32(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1d))]
+pub unsafe fn svldnf1_u64(pg: svbool_t, base: *const u64) -> svuint64_t {
+    svldnf1_s64(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1w))]
+pub unsafe fn svldnf1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t {
+    svldnf1_f32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1d))]
+pub unsafe fn svldnf1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t {
+    svldnf1_f64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1b))]
+pub unsafe fn svldnf1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t {
+    svldnf1_s8(pg, base.offset(svcntb() as isize * vnum as isize))
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1h))]
+pub unsafe fn svldnf1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t {
+    svldnf1_s16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1w))]
+pub unsafe fn svldnf1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t {
+    svldnf1_s32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1d))]
+pub unsafe fn svldnf1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t {
+    svldnf1_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1b))]
+pub unsafe fn svldnf1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t {
+    svldnf1_u8(pg, base.offset(svcntb() as isize * vnum as isize))
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1h))]
+pub unsafe fn svldnf1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t {
+    svldnf1_u16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1w))]
+pub unsafe fn svldnf1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t {
+    svldnf1_u32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Unextended load, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1_vnum[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1d))]
+pub unsafe fn svldnf1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t {
+    svldnf1_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sb))]
+pub unsafe fn svldnf1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i8")]
+        fn _svldnf1sb_s16(pg: svbool8_t, base: *const i8) -> nxv8i8;
+    }
+    simd_cast(_svldnf1sb_s16(pg.into(), base))
+}
+#[doc = "Load 8-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sb))]
+pub unsafe fn svldnf1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i8")]
+        fn _svldnf1sb_s32(pg: svbool4_t, base: *const i8) -> nxv4i8;
+    }
+    simd_cast(_svldnf1sb_s32(pg.into(), base))
+}
+#[doc = "Load 16-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sh))]
+pub unsafe fn svldnf1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i16")]
+        fn _svldnf1sh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16;
+    }
+    simd_cast(_svldnf1sh_s32(pg.into(), base))
+}
+#[doc = "Load 8-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sb))]
+pub unsafe fn svldnf1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i8")]
+        fn _svldnf1sb_s64(pg: svbool2_t, base: *const i8) -> nxv2i8;
+    }
+    simd_cast(_svldnf1sb_s64(pg.into(), base))
+}
+#[doc = "Load 16-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sh))]
+pub unsafe fn svldnf1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i16")]
+        fn _svldnf1sh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16;
+    }
+    simd_cast(_svldnf1sh_s64(pg.into(), base))
+}
+#[doc = "Load 32-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sw))]
+pub unsafe fn svldnf1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i32")]
+        fn _svldnf1sw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32;
+    }
+    simd_cast(_svldnf1sw_s64(pg.into(), base))
+}
+#[doc = "Load 8-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sb))]
+pub unsafe fn svldnf1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t {
+    svldnf1sb_s16(pg, base).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sb))]
+pub unsafe fn svldnf1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t {
+    svldnf1sb_s32(pg, base).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sh))]
+pub unsafe fn svldnf1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t {
+    svldnf1sh_s32(pg, base).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sb))]
+pub unsafe fn svldnf1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t {
+    svldnf1sb_s64(pg, base).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sh))]
+pub unsafe fn svldnf1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t {
+    svldnf1sh_s64(pg, base).as_unsigned()
+}
+#[doc = "Load 32-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sw))]
+pub unsafe fn svldnf1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t {
+    svldnf1sw_s64(pg, base).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sb))]
+pub unsafe fn svldnf1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svint16_t {
+    svldnf1sb_s16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sb))]
+pub unsafe fn svldnf1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svint32_t {
+    svldnf1sb_s32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sh))]
+pub unsafe fn svldnf1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svint32_t {
+    svldnf1sh_s32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sb))]
+pub unsafe fn svldnf1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svint64_t {
+    svldnf1sb_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sh))]
+pub unsafe fn svldnf1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svint64_t {
+    svldnf1sh_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 32-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_vnum_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sw))]
+pub unsafe fn svldnf1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svint64_t {
+    svldnf1sw_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sb))]
+pub unsafe fn svldnf1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svuint16_t {
+    svldnf1sb_u16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sb))]
+pub unsafe fn svldnf1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svuint32_t {
+    svldnf1sb_u32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sh))]
+pub unsafe fn svldnf1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svuint32_t {
+    svldnf1sh_u32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sb_vnum_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sb))]
+pub unsafe fn svldnf1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svuint64_t {
+    svldnf1sb_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sh_vnum_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sh))]
+pub unsafe fn svldnf1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svuint64_t {
+    svldnf1sh_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 32-bit data and sign-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1sw_vnum_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1sw))]
+pub unsafe fn svldnf1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svuint64_t {
+    svldnf1sw_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1b))]
+pub unsafe fn svldnf1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv8i8")]
+        fn _svldnf1ub_s16(pg: svbool8_t, base: *const i8) -> nxv8i8;
+    }
+    simd_cast::<nxv8u8, _>(_svldnf1ub_s16(pg.into(), base.as_signed()).as_unsigned())
+}
+#[doc = "Load 8-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1b))]
+pub unsafe fn svldnf1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i8")]
+        fn _svldnf1ub_s32(pg: svbool4_t, base: *const i8) -> nxv4i8;
+    }
+    simd_cast::<nxv4u8, _>(_svldnf1ub_s32(pg.into(), base.as_signed()).as_unsigned())
+}
+#[doc = "Load 16-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1h))]
+pub unsafe fn svldnf1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv4i16")]
+        fn _svldnf1uh_s32(pg: svbool4_t, base: *const i16) -> nxv4i16;
+    }
+    simd_cast::<nxv4u16, _>(_svldnf1uh_s32(pg.into(), base.as_signed()).as_unsigned())
+}
+#[doc = "Load 8-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1b))]
+pub unsafe fn svldnf1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i8")]
+        fn _svldnf1ub_s64(pg: svbool2_t, base: *const i8) -> nxv2i8;
+    }
+    simd_cast::<nxv2u8, _>(_svldnf1ub_s64(pg.into(), base.as_signed()).as_unsigned())
+}
+#[doc = "Load 16-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1h))]
+pub unsafe fn svldnf1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i16")]
+        fn _svldnf1uh_s64(pg: svbool2_t, base: *const i16) -> nxv2i16;
+    }
+    simd_cast::<nxv2u16, _>(_svldnf1uh_s64(pg.into(), base.as_signed()).as_unsigned())
+}
+#[doc = "Load 32-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1w))]
+pub unsafe fn svldnf1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnf1.nxv2i32")]
+        fn _svldnf1uw_s64(pg: svbool2_t, base: *const i32) -> nxv2i32;
+    }
+    simd_cast::<nxv2u32, _>(_svldnf1uw_s64(pg.into(), base.as_signed()).as_unsigned())
+}
+#[doc = "Load 8-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1b))]
+pub unsafe fn svldnf1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t {
+    svldnf1ub_s16(pg, base).as_unsigned()
+}
+#[doc = "Load 8-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1b))]
+pub unsafe fn svldnf1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t {
+    svldnf1ub_s32(pg, base).as_unsigned()
+}
+#[doc = "Load 16-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1h))]
+pub unsafe fn svldnf1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t {
+    svldnf1uh_s32(pg, base).as_unsigned()
+}
+#[doc = "Load 8-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1b))]
+pub unsafe fn svldnf1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t {
+    svldnf1ub_s64(pg, base).as_unsigned()
+}
+#[doc = "Load 16-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1h))]
+pub unsafe fn svldnf1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t {
+    svldnf1uh_s64(pg, base).as_unsigned()
+}
+#[doc = "Load 32-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1w))]
+pub unsafe fn svldnf1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t {
+    svldnf1uw_s64(pg, base).as_unsigned()
+}
+#[doc = "Load 8-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1b))]
+pub unsafe fn svldnf1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svint16_t {
+    svldnf1ub_s16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1b))]
+pub unsafe fn svldnf1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svint32_t {
+    svldnf1ub_s32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1h))]
+pub unsafe fn svldnf1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svint32_t {
+    svldnf1uh_s32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1b))]
+pub unsafe fn svldnf1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svint64_t {
+    svldnf1ub_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1h))]
+pub unsafe fn svldnf1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svint64_t {
+    svldnf1uh_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 32-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_vnum_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1w))]
+pub unsafe fn svldnf1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svint64_t {
+    svldnf1uw_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1b))]
+pub unsafe fn svldnf1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svuint16_t {
+    svldnf1ub_u16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1b))]
+pub unsafe fn svldnf1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svuint32_t {
+    svldnf1ub_u32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1h))]
+pub unsafe fn svldnf1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svuint32_t {
+    svldnf1uh_u32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Load 8-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1ub_vnum_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1b))]
+pub unsafe fn svldnf1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svuint64_t {
+    svldnf1ub_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 16-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uh_vnum_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1h))]
+pub unsafe fn svldnf1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svuint64_t {
+    svldnf1uh_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Load 32-bit data and zero-extend, non-faulting"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnf1uw_vnum_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."]
+#[doc = "  * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnf1w))]
+pub unsafe fn svldnf1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svuint64_t {
+    svldnf1uw_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv4f32")]
+        fn _svldnt1_f32(pg: svbool4_t, base: *const f32) -> svfloat32_t;
+    }
+    _svldnt1_f32(pg.into(), base)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv2f64")]
+        fn _svldnt1_f64(pg: svbool2_t, base: *const f64) -> svfloat64_t;
+    }
+    _svldnt1_f64(pg.into(), base)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1b))]
+pub unsafe fn svldnt1_s8(pg: svbool_t, base: *const i8) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv16i8")]
+        fn _svldnt1_s8(pg: svbool_t, base: *const i8) -> svint8_t;
+    }
+    _svldnt1_s8(pg, base)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1_s16(pg: svbool_t, base: *const i16) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv8i16")]
+        fn _svldnt1_s16(pg: svbool8_t, base: *const i16) -> svint16_t;
+    }
+    _svldnt1_s16(pg.into(), base)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1_s32(pg: svbool_t, base: *const i32) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv4i32")]
+        fn _svldnt1_s32(pg: svbool4_t, base: *const i32) -> svint32_t;
+    }
+    _svldnt1_s32(pg.into(), base)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_s64(pg: svbool_t, base: *const i64) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ldnt1.nxv2i64")]
+        fn _svldnt1_s64(pg: svbool2_t, base: *const i64) -> svint64_t;
+    }
+    _svldnt1_s64(pg.into(), base)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1b))]
+pub unsafe fn svldnt1_u8(pg: svbool_t, base: *const u8) -> svuint8_t {
+    svldnt1_s8(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1_u16(pg: svbool_t, base: *const u16) -> svuint16_t {
+    svldnt1_s16(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1_u32(pg: svbool_t, base: *const u32) -> svuint32_t {
+    svldnt1_s32(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_u64(pg: svbool_t, base: *const u64) -> svuint64_t {
+    svldnt1_s64(pg, base.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svfloat32_t {
+    svldnt1_f32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svfloat64_t {
+    svldnt1_f64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1b))]
+pub unsafe fn svldnt1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_t {
+    svldnt1_s8(pg, base.offset(svcntb() as isize * vnum as isize))
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint16_t {
+    svldnt1_s16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint32_t {
+    svldnt1_s32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint64_t {
+    svldnt1_s64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1b))]
+pub unsafe fn svldnt1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8_t {
+    svldnt1_u8(pg, base.offset(svcntb() as isize * vnum as isize))
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuint16_t {
+    svldnt1_u16(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuint32_t {
+    svldnt1_u32(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_vnum[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuint64_t {
+    svldnt1_u64(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Count the number of elements in a full vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cntw))]
+pub fn svlen_f32(_op: svfloat32_t) -> u64 {
+    svcntw()
+}
+#[doc = "Count the number of elements in a full vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cntd))]
+pub fn svlen_f64(_op: svfloat64_t) -> u64 {
+    svcntd()
+}
+#[doc = "Count the number of elements in a full vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rdvl))]
+pub fn svlen_s8(_op: svint8_t) -> u64 {
+    svcntb()
+}
+#[doc = "Count the number of elements in a full vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnth))]
+pub fn svlen_s16(_op: svint16_t) -> u64 {
+    svcnth()
+}
+#[doc = "Count the number of elements in a full vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cntw))]
+pub fn svlen_s32(_op: svint32_t) -> u64 {
+    svcntw()
+}
+#[doc = "Count the number of elements in a full vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cntd))]
+pub fn svlen_s64(_op: svint64_t) -> u64 {
+    svcntd()
+}
+#[doc = "Count the number of elements in a full vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rdvl))]
+pub fn svlen_u8(_op: svuint8_t) -> u64 {
+    svcntb()
+}
+#[doc = "Count the number of elements in a full vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cnth))]
+pub fn svlen_u16(_op: svuint16_t) -> u64 {
+    svcnth()
+}
+#[doc = "Count the number of elements in a full vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cntw))]
+pub fn svlen_u32(_op: svuint32_t) -> u64 {
+    svcntw()
+}
+#[doc = "Count the number of elements in a full vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(cntd))]
+pub fn svlen_u64(_op: svuint64_t) -> u64 {
+    svcntd()
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv16i8")]
+        fn _svlsl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svlsl_s8_m(pg, op1, op2.as_signed()) }
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t {
+    svlsl_s8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t {
+    svlsl_s8_m(pg, op1, op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t {
+    svlsl_s8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t {
+    svlsl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t {
+    svlsl_s8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv8i16")]
+        fn _svlsl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svlsl_s16_m(pg.into(), op1, op2.as_signed()) }
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t {
+    svlsl_s16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t {
+    svlsl_s16_m(pg, op1, op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t {
+    svlsl_s16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t {
+    svlsl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t {
+    svlsl_s16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv4i32")]
+        fn _svlsl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svlsl_s32_m(pg.into(), op1, op2.as_signed()) }
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t {
+    svlsl_s32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t {
+    svlsl_s32_m(pg, op1, op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t {
+    svlsl_s32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t {
+    svlsl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t {
+    svlsl_s32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsl.nxv2i64")]
+        fn _svlsl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svlsl_s64_m(pg.into(), op1, op2.as_signed()) }
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t {
+    svlsl_s64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t {
+    svlsl_s64_m(pg, op1, op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t {
+    svlsl_s64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t {
+    svlsl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t {
+    svlsl_s64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svlsl_s8_m(pg, op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svlsl_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svlsl_u8_m(pg, op1, op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svlsl_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svlsl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svlsl_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svlsl_s16_m(pg, op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svlsl_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svlsl_u16_m(pg, op1, op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svlsl_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svlsl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svlsl_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svlsl_s32_m(pg, op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svlsl_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svlsl_u32_m(pg, op1, op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svlsl_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svlsl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svlsl_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svlsl_s64_m(pg, op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svlsl_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svlsl_u64_m(pg, op1, op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svlsl_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svlsl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svlsl_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.lsl.wide.nxv16i8"
+        )]
+        fn _svlsl_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t;
+    }
+    unsafe { _svlsl_wide_s8_m(pg, op1, op2.as_signed()) }
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t {
+    svlsl_wide_s8_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t {
+    svlsl_wide_s8_m(pg, op1, op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t {
+    svlsl_wide_s8_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t {
+    svlsl_wide_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t {
+    svlsl_wide_s8_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.lsl.wide.nxv8i16"
+        )]
+        fn _svlsl_wide_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t;
+    }
+    unsafe { _svlsl_wide_s16_m(pg.into(), op1, op2.as_signed()) }
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t {
+    svlsl_wide_s16_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t {
+    svlsl_wide_s16_m(pg, op1, op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t {
+    svlsl_wide_s16_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint16_t {
+    svlsl_wide_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t {
+    svlsl_wide_s16_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.lsl.wide.nxv4i32"
+        )]
+        fn _svlsl_wide_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t;
+    }
+    unsafe { _svlsl_wide_s32_m(pg.into(), op1, op2.as_signed()) }
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t {
+    svlsl_wide_s32_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t {
+    svlsl_wide_s32_m(pg, op1, op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t {
+    svlsl_wide_s32_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint32_t {
+    svlsl_wide_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t {
+    svlsl_wide_s32_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t {
+    unsafe { svlsl_wide_s8_m(pg, op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t {
+    svlsl_wide_u8_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t {
+    svlsl_wide_u8_m(pg, op1, op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t {
+    svlsl_wide_u8_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t {
+    svlsl_wide_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t {
+    svlsl_wide_u8_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t {
+    unsafe { svlsl_wide_s16_m(pg, op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t {
+    svlsl_wide_u16_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t {
+    svlsl_wide_u16_m(pg, op1, op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t {
+    svlsl_wide_u16_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t {
+    svlsl_wide_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t {
+    svlsl_wide_u16_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t {
+    unsafe { svlsl_wide_s32_m(pg, op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t {
+    svlsl_wide_u32_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t {
+    svlsl_wide_u32_m(pg, op1, op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t {
+    svlsl_wide_u32_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t {
+    svlsl_wide_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Logical shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsl))]
+pub fn svlsl_wide_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t {
+    svlsl_wide_u32_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv16i8")]
+        fn _svlsr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svlsr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svlsr_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svlsr_u8_m(pg, op1, op2)
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svlsr_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svlsr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svlsr_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv8i16")]
+        fn _svlsr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svlsr_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svlsr_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svlsr_u16_m(pg, op1, op2)
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svlsr_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svlsr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svlsr_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv4i32")]
+        fn _svlsr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svlsr_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svlsr_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svlsr_u32_m(pg, op1, op2)
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svlsr_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svlsr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svlsr_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.lsr.nxv2i64")]
+        fn _svlsr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svlsr_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svlsr_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svlsr_u64_m(pg, op1, op2)
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svlsr_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svlsr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svlsr_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_wide_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.lsr.wide.nxv16i8"
+        )]
+        fn _svlsr_wide_u8_m(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svint8_t;
+    }
+    unsafe { _svlsr_wide_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_wide_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t {
+    svlsr_wide_u8_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_wide_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t {
+    svlsr_wide_u8_m(pg, op1, op2)
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_wide_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t {
+    svlsr_wide_u8_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_wide_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8_t {
+    svlsr_wide_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_wide_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t {
+    svlsr_wide_u8_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_wide_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.lsr.wide.nxv8i16"
+        )]
+        fn _svlsr_wide_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint64_t) -> svint16_t;
+    }
+    unsafe { _svlsr_wide_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_wide_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t {
+    svlsr_wide_u16_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_wide_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t {
+    svlsr_wide_u16_m(pg, op1, op2)
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_wide_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t {
+    svlsr_wide_u16_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_wide_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuint16_t {
+    svlsr_wide_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_wide_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t {
+    svlsr_wide_u16_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_wide_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.lsr.wide.nxv4i32"
+        )]
+        fn _svlsr_wide_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint64_t) -> svint32_t;
+    }
+    unsafe { _svlsr_wide_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_wide_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t {
+    svlsr_wide_u32_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_wide_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t {
+    svlsr_wide_u32_m(pg, op1, op2)
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_wide_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t {
+    svlsr_wide_u32_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_wide_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuint32_t {
+    svlsr_wide_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Logical shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(lsr))]
+pub fn svlsr_wide_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t {
+    svlsr_wide_u32_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmad))]
+pub fn svmad_f32_m(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmad.nxv4f32")]
+        fn _svmad_f32_m(
+            pg: svbool4_t,
+            op1: svfloat32_t,
+            op2: svfloat32_t,
+            op3: svfloat32_t,
+        ) -> svfloat32_t;
+    }
+    unsafe { _svmad_f32_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmad))]
+pub fn svmad_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svmad_f32_m(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmad))]
+pub fn svmad_f32_x(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    svmad_f32_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmad))]
+pub fn svmad_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svmad_f32_x(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmad))]
+pub fn svmad_f32_z(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    svmad_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmad))]
+pub fn svmad_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svmad_f32_z(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmad))]
+pub fn svmad_f64_m(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmad.nxv2f64")]
+        fn _svmad_f64_m(
+            pg: svbool2_t,
+            op1: svfloat64_t,
+            op2: svfloat64_t,
+            op3: svfloat64_t,
+        ) -> svfloat64_t;
+    }
+    unsafe { _svmad_f64_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmad))]
+pub fn svmad_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svmad_f64_m(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmad))]
+pub fn svmad_f64_x(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    svmad_f64_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmad))]
+pub fn svmad_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svmad_f64_x(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmad))]
+pub fn svmad_f64_z(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    svmad_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmad))]
+pub fn svmad_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svmad_f64_z(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv16i8")]
+        fn _svmad_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
+    }
+    unsafe { _svmad_s8_m(pg, op1, op2, op3) }
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svmad_s8_m(pg, op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    svmad_s8_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svmad_s8_x(pg, op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    svmad_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svmad_s8_z(pg, op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv8i16")]
+        fn _svmad_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t)
+            -> svint16_t;
+    }
+    unsafe { _svmad_s16_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svmad_s16_m(pg, op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    svmad_s16_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svmad_s16_x(pg, op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    svmad_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svmad_s16_z(pg, op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv4i32")]
+        fn _svmad_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t)
+            -> svint32_t;
+    }
+    unsafe { _svmad_s32_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svmad_s32_m(pg, op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    svmad_s32_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svmad_s32_x(pg, op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    svmad_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svmad_s32_z(pg, op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mad.nxv2i64")]
+        fn _svmad_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t)
+            -> svint64_t;
+    }
+    unsafe { _svmad_s64_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svmad_s64_m(pg, op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    svmad_s64_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svmad_s64_x(pg, op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    svmad_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svmad_s64_z(pg, op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
+    unsafe { svmad_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
+    svmad_u8_m(pg, op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
+    svmad_u8_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
+    svmad_u8_x(pg, op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
+    svmad_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
+    svmad_u8_z(pg, op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
+    unsafe { svmad_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
+    svmad_u16_m(pg, op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
+    svmad_u16_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
+    svmad_u16_x(pg, op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
+    svmad_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
+    svmad_u16_z(pg, op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    unsafe { svmad_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svmad_u32_m(pg, op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    svmad_u32_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svmad_u32_x(pg, op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    svmad_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svmad_u32_z(pg, op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    unsafe { svmad_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svmad_u64_m(pg, op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    svmad_u64_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svmad_u64_x(pg, op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    svmad_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3)
+}
+#[doc = "Multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mad))]
+pub fn svmad_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svmad_u64_z(pg, op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmax))]
+pub fn svmax_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmax.nxv4f32")]
+        fn _svmax_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svmax_f32_m(pg.into(), op1, op2) }
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmax))]
+pub fn svmax_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svmax_f32_m(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmax))]
+pub fn svmax_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svmax_f32_m(pg, op1, op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmax))]
+pub fn svmax_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svmax_f32_x(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmax))]
+pub fn svmax_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svmax_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmax))]
+pub fn svmax_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svmax_f32_z(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmax))]
+pub fn svmax_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmax.nxv2f64")]
+        fn _svmax_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svmax_f64_m(pg.into(), op1, op2) }
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmax))]
+pub fn svmax_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svmax_f64_m(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmax))]
+pub fn svmax_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svmax_f64_m(pg, op1, op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmax))]
+pub fn svmax_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svmax_f64_x(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmax))]
+pub fn svmax_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svmax_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmax))]
+pub fn svmax_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svmax_f64_z(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv16i8")]
+        fn _svmax_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svmax_s8_m(pg, op1, op2) }
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svmax_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svmax_s8_m(pg, op1, op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svmax_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svmax_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svmax_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv8i16")]
+        fn _svmax_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svmax_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svmax_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svmax_s16_m(pg, op1, op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svmax_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svmax_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svmax_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv4i32")]
+        fn _svmax_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svmax_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svmax_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svmax_s32_m(pg, op1, op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svmax_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svmax_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svmax_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smax.nxv2i64")]
+        fn _svmax_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svmax_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svmax_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svmax_s64_m(pg, op1, op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svmax_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svmax_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smax))]
+pub fn svmax_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svmax_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv16i8")]
+        fn _svmax_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svmax_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svmax_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svmax_u8_m(pg, op1, op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svmax_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svmax_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svmax_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv8i16")]
+        fn _svmax_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svmax_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svmax_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svmax_u16_m(pg, op1, op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svmax_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svmax_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svmax_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv4i32")]
+        fn _svmax_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svmax_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svmax_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svmax_u32_m(pg, op1, op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svmax_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svmax_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svmax_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umax.nxv2i64")]
+        fn _svmax_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svmax_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svmax_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svmax_u64_m(pg, op1, op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svmax_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svmax_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Maximum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umax))]
+pub fn svmax_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svmax_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Maximum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmaxnm))]
+pub fn svmaxnm_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxnm.nxv4f32")]
+        fn _svmaxnm_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svmaxnm_f32_m(pg.into(), op1, op2) }
+}
+#[doc = "Maximum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmaxnm))]
+pub fn svmaxnm_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svmaxnm_f32_m(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Maximum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmaxnm))]
+pub fn svmaxnm_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svmaxnm_f32_m(pg, op1, op2)
+}
+#[doc = "Maximum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmaxnm))]
+pub fn svmaxnm_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svmaxnm_f32_x(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Maximum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmaxnm))]
+pub fn svmaxnm_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svmaxnm_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
+}
+#[doc = "Maximum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmaxnm))]
+pub fn svmaxnm_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svmaxnm_f32_z(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Maximum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmaxnm))]
+pub fn svmaxnm_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxnm.nxv2f64")]
+        fn _svmaxnm_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svmaxnm_f64_m(pg.into(), op1, op2) }
+}
+#[doc = "Maximum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmaxnm))]
+pub fn svmaxnm_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svmaxnm_f64_m(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Maximum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmaxnm))]
+pub fn svmaxnm_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svmaxnm_f64_m(pg, op1, op2)
+}
+#[doc = "Maximum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmaxnm))]
+pub fn svmaxnm_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svmaxnm_f64_x(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Maximum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmaxnm))]
+pub fn svmaxnm_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svmaxnm_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
+}
+#[doc = "Maximum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmaxnm))]
+pub fn svmaxnm_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svmaxnm_f64_z(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Maximum number reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmv[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmaxnmv))]
+pub fn svmaxnmv_f32(pg: svbool_t, op: svfloat32_t) -> f32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.fmaxnmv.nxv4f32"
+        )]
+        fn _svmaxnmv_f32(pg: svbool4_t, op: svfloat32_t) -> f32;
+    }
+    unsafe { _svmaxnmv_f32(pg.into(), op) }
+}
+#[doc = "Maximum number reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmv[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmaxnmv))]
+pub fn svmaxnmv_f64(pg: svbool_t, op: svfloat64_t) -> f64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.fmaxnmv.nxv2f64"
+        )]
+        fn _svmaxnmv_f64(pg: svbool2_t, op: svfloat64_t) -> f64;
+    }
+    unsafe { _svmaxnmv_f64(pg.into(), op) }
+}
+#[doc = "Maximum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmaxv))]
+pub fn svmaxv_f32(pg: svbool_t, op: svfloat32_t) -> f32 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxv.nxv4f32")]
+        fn _svmaxv_f32(pg: svbool4_t, op: svfloat32_t) -> f32;
+    }
+    unsafe { _svmaxv_f32(pg.into(), op) }
+}
+#[doc = "Maximum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmaxv))]
+pub fn svmaxv_f64(pg: svbool_t, op: svfloat64_t) -> f64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxv.nxv2f64")]
+        fn _svmaxv_f64(pg: svbool2_t, op: svfloat64_t) -> f64;
+    }
+    unsafe { _svmaxv_f64(pg.into(), op) }
+}
+#[doc = "Maximum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smaxv))]
+pub fn svmaxv_s8(pg: svbool_t, op: svint8_t) -> i8 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv16i8")]
+        fn _svmaxv_s8(pg: svbool_t, op: svint8_t) -> i8;
+    }
+    unsafe { _svmaxv_s8(pg, op) }
+}
+#[doc = "Maximum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smaxv))]
+pub fn svmaxv_s16(pg: svbool_t, op: svint16_t) -> i16 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv8i16")]
+        fn _svmaxv_s16(pg: svbool8_t, op: svint16_t) -> i16;
+    }
+    unsafe { _svmaxv_s16(pg.into(), op) }
+}
+#[doc = "Maximum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smaxv))]
+pub fn svmaxv_s32(pg: svbool_t, op: svint32_t) -> i32 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv4i32")]
+        fn _svmaxv_s32(pg: svbool4_t, op: svint32_t) -> i32;
+    }
+    unsafe { _svmaxv_s32(pg.into(), op) }
+}
+#[doc = "Maximum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smaxv))]
+pub fn svmaxv_s64(pg: svbool_t, op: svint64_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxv.nxv2i64")]
+        fn _svmaxv_s64(pg: svbool2_t, op: svint64_t) -> i64;
+    }
+    unsafe { _svmaxv_s64(pg.into(), op) }
+}
+#[doc = "Maximum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umaxv))]
+pub fn svmaxv_u8(pg: svbool_t, op: svuint8_t) -> u8 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv16i8")]
+        fn _svmaxv_u8(pg: svbool_t, op: svint8_t) -> i8;
+    }
+    unsafe { _svmaxv_u8(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Maximum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umaxv))]
+pub fn svmaxv_u16(pg: svbool_t, op: svuint16_t) -> u16 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv8i16")]
+        fn _svmaxv_u16(pg: svbool8_t, op: svint16_t) -> i16;
+    }
+    unsafe { _svmaxv_u16(pg.into(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Maximum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umaxv))]
+pub fn svmaxv_u32(pg: svbool_t, op: svuint32_t) -> u32 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv4i32")]
+        fn _svmaxv_u32(pg: svbool4_t, op: svint32_t) -> i32;
+    }
+    unsafe { _svmaxv_u32(pg.into(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Maximum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umaxv))]
+pub fn svmaxv_u64(pg: svbool_t, op: svuint64_t) -> u64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxv.nxv2i64")]
+        fn _svmaxv_u64(pg: svbool2_t, op: svint64_t) -> i64;
+    }
+    unsafe { _svmaxv_u64(pg.into(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmin))]
+pub fn svmin_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmin.nxv4f32")]
+        fn _svmin_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svmin_f32_m(pg.into(), op1, op2) }
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmin))]
+pub fn svmin_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svmin_f32_m(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmin))]
+pub fn svmin_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svmin_f32_m(pg, op1, op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmin))]
+pub fn svmin_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svmin_f32_x(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmin))]
+pub fn svmin_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svmin_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmin))]
+pub fn svmin_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svmin_f32_z(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmin))]
+pub fn svmin_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmin.nxv2f64")]
+        fn _svmin_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svmin_f64_m(pg.into(), op1, op2) }
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmin))]
+pub fn svmin_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svmin_f64_m(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmin))]
+pub fn svmin_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svmin_f64_m(pg, op1, op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmin))]
+pub fn svmin_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svmin_f64_x(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmin))]
+pub fn svmin_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svmin_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmin))]
+pub fn svmin_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svmin_f64_z(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv16i8")]
+        fn _svmin_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svmin_s8_m(pg, op1, op2) }
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svmin_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svmin_s8_m(pg, op1, op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svmin_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svmin_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svmin_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv8i16")]
+        fn _svmin_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svmin_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svmin_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svmin_s16_m(pg, op1, op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svmin_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svmin_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svmin_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv4i32")]
+        fn _svmin_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svmin_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svmin_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svmin_s32_m(pg, op1, op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svmin_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svmin_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svmin_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smin.nxv2i64")]
+        fn _svmin_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svmin_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svmin_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svmin_s64_m(pg, op1, op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svmin_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svmin_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smin))]
+pub fn svmin_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svmin_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv16i8")]
+        fn _svmin_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svmin_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svmin_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svmin_u8_m(pg, op1, op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svmin_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svmin_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svmin_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv8i16")]
+        fn _svmin_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svmin_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svmin_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svmin_u16_m(pg, op1, op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svmin_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svmin_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svmin_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv4i32")]
+        fn _svmin_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svmin_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svmin_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svmin_u32_m(pg, op1, op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svmin_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svmin_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svmin_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umin.nxv2i64")]
+        fn _svmin_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svmin_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svmin_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svmin_u64_m(pg, op1, op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svmin_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svmin_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Minimum"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umin))]
+pub fn svmin_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svmin_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Minimum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fminnm))]
+pub fn svminnm_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminnm.nxv4f32")]
+        fn _svminnm_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svminnm_f32_m(pg.into(), op1, op2) }
+}
+#[doc = "Minimum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fminnm))]
+pub fn svminnm_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svminnm_f32_m(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Minimum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fminnm))]
+pub fn svminnm_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svminnm_f32_m(pg, op1, op2)
+}
+#[doc = "Minimum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fminnm))]
+pub fn svminnm_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svminnm_f32_x(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Minimum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fminnm))]
+pub fn svminnm_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svminnm_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
+}
+#[doc = "Minimum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fminnm))]
+pub fn svminnm_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svminnm_f32_z(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Minimum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fminnm))]
+pub fn svminnm_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminnm.nxv2f64")]
+        fn _svminnm_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svminnm_f64_m(pg.into(), op1, op2) }
+}
+#[doc = "Minimum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fminnm))]
+pub fn svminnm_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svminnm_f64_m(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Minimum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fminnm))]
+pub fn svminnm_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svminnm_f64_m(pg, op1, op2)
+}
+#[doc = "Minimum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fminnm))]
+pub fn svminnm_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svminnm_f64_x(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Minimum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fminnm))]
+pub fn svminnm_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svminnm_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
+}
+#[doc = "Minimum number"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fminnm))]
+pub fn svminnm_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svminnm_f64_z(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Minimum number reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmv[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fminnmv))]
+pub fn svminnmv_f32(pg: svbool_t, op: svfloat32_t) -> f32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.fminnmv.nxv4f32"
+        )]
+        fn _svminnmv_f32(pg: svbool4_t, op: svfloat32_t) -> f32;
+    }
+    unsafe { _svminnmv_f32(pg.into(), op) }
+}
+#[doc = "Minimum number reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmv[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fminnmv))]
+pub fn svminnmv_f64(pg: svbool_t, op: svfloat64_t) -> f64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.fminnmv.nxv2f64"
+        )]
+        fn _svminnmv_f64(pg: svbool2_t, op: svfloat64_t) -> f64;
+    }
+    unsafe { _svminnmv_f64(pg.into(), op) }
+}
+#[doc = "Minimum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fminv))]
+pub fn svminv_f32(pg: svbool_t, op: svfloat32_t) -> f32 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminv.nxv4f32")]
+        fn _svminv_f32(pg: svbool4_t, op: svfloat32_t) -> f32;
+    }
+    unsafe { _svminv_f32(pg.into(), op) }
+}
+#[doc = "Minimum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fminv))]
+pub fn svminv_f64(pg: svbool_t, op: svfloat64_t) -> f64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminv.nxv2f64")]
+        fn _svminv_f64(pg: svbool2_t, op: svfloat64_t) -> f64;
+    }
+    unsafe { _svminv_f64(pg.into(), op) }
+}
+#[doc = "Minimum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sminv))]
+pub fn svminv_s8(pg: svbool_t, op: svint8_t) -> i8 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv16i8")]
+        fn _svminv_s8(pg: svbool_t, op: svint8_t) -> i8;
+    }
+    unsafe { _svminv_s8(pg, op) }
+}
+#[doc = "Minimum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sminv))]
+pub fn svminv_s16(pg: svbool_t, op: svint16_t) -> i16 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv8i16")]
+        fn _svminv_s16(pg: svbool8_t, op: svint16_t) -> i16;
+    }
+    unsafe { _svminv_s16(pg.into(), op) }
+}
+#[doc = "Minimum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sminv))]
+pub fn svminv_s32(pg: svbool_t, op: svint32_t) -> i32 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv4i32")]
+        fn _svminv_s32(pg: svbool4_t, op: svint32_t) -> i32;
+    }
+    unsafe { _svminv_s32(pg.into(), op) }
+}
+#[doc = "Minimum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sminv))]
+pub fn svminv_s64(pg: svbool_t, op: svint64_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminv.nxv2i64")]
+        fn _svminv_s64(pg: svbool2_t, op: svint64_t) -> i64;
+    }
+    unsafe { _svminv_s64(pg.into(), op) }
+}
+#[doc = "Minimum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uminv))]
+pub fn svminv_u8(pg: svbool_t, op: svuint8_t) -> u8 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv16i8")]
+        fn _svminv_u8(pg: svbool_t, op: svint8_t) -> i8;
+    }
+    unsafe { _svminv_u8(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Minimum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uminv))]
+pub fn svminv_u16(pg: svbool_t, op: svuint16_t) -> u16 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv8i16")]
+        fn _svminv_u16(pg: svbool8_t, op: svint16_t) -> i16;
+    }
+    unsafe { _svminv_u16(pg.into(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Minimum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uminv))]
+pub fn svminv_u32(pg: svbool_t, op: svuint32_t) -> u32 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv4i32")]
+        fn _svminv_u32(pg: svbool4_t, op: svint32_t) -> i32;
+    }
+    unsafe { _svminv_u32(pg.into(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Minimum reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uminv))]
+pub fn svminv_u64(pg: svbool_t, op: svuint64_t) -> u64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminv.nxv2i64")]
+        fn _svminv_u64(pg: svbool2_t, op: svint64_t) -> i64;
+    }
+    unsafe { _svminv_u64(pg.into(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmla))]
+pub fn svmla_f32_m(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmla.nxv4f32")]
+        fn _svmla_f32_m(
+            pg: svbool4_t,
+            op1: svfloat32_t,
+            op2: svfloat32_t,
+            op3: svfloat32_t,
+        ) -> svfloat32_t;
+    }
+    unsafe { _svmla_f32_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmla))]
+pub fn svmla_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svmla_f32_m(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmla))]
+pub fn svmla_f32_x(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    svmla_f32_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmla))]
+pub fn svmla_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svmla_f32_x(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmla))]
+pub fn svmla_f32_z(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    svmla_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmla))]
+pub fn svmla_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svmla_f32_z(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmla))]
+pub fn svmla_f64_m(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmla.nxv2f64")]
+        fn _svmla_f64_m(
+            pg: svbool2_t,
+            op1: svfloat64_t,
+            op2: svfloat64_t,
+            op3: svfloat64_t,
+        ) -> svfloat64_t;
+    }
+    unsafe { _svmla_f64_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmla))]
+pub fn svmla_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svmla_f64_m(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmla))]
+pub fn svmla_f64_x(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    svmla_f64_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmla))]
+pub fn svmla_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svmla_f64_x(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmla))]
+pub fn svmla_f64_z(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    svmla_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmla))]
+pub fn svmla_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svmla_f64_z(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv16i8")]
+        fn _svmla_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
+    }
+    unsafe { _svmla_s8_m(pg, op1, op2, op3) }
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svmla_s8_m(pg, op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    svmla_s8_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svmla_s8_x(pg, op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    svmla_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svmla_s8_z(pg, op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv8i16")]
+        fn _svmla_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t)
+            -> svint16_t;
+    }
+    unsafe { _svmla_s16_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svmla_s16_m(pg, op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    svmla_s16_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svmla_s16_x(pg, op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    svmla_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svmla_s16_z(pg, op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv4i32")]
+        fn _svmla_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t)
+            -> svint32_t;
+    }
+    unsafe { _svmla_s32_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svmla_s32_m(pg, op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    svmla_s32_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svmla_s32_x(pg, op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    svmla_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svmla_s32_z(pg, op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mla.nxv2i64")]
+        fn _svmla_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t)
+            -> svint64_t;
+    }
+    unsafe { _svmla_s64_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svmla_s64_m(pg, op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    svmla_s64_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svmla_s64_x(pg, op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    svmla_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svmla_s64_z(pg, op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
+    unsafe { svmla_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
+    svmla_u8_m(pg, op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
+    svmla_u8_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
+    svmla_u8_x(pg, op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
+    svmla_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
+    svmla_u8_z(pg, op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
+    unsafe { svmla_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
+    svmla_u16_m(pg, op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
+    svmla_u16_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
+    svmla_u16_x(pg, op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
+    svmla_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
+    svmla_u16_z(pg, op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    unsafe { svmla_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svmla_u32_m(pg, op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    svmla_u32_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svmla_u32_x(pg, op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    svmla_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svmla_u32_z(pg, op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    unsafe { svmla_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svmla_u64_m(pg, op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    svmla_u64_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svmla_u64_x(pg, op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    svmla_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mla))]
+pub fn svmla_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svmla_u64_z(pg, op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmla, IMM_INDEX = 0))]
+pub fn svmla_lane_f32<const IMM_INDEX: i32>(
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.fmla.lane.nxv4f32"
+        )]
+        fn _svmla_lane_f32(
+            op1: svfloat32_t,
+            op2: svfloat32_t,
+            op3: svfloat32_t,
+            IMM_INDEX: i32,
+        ) -> svfloat32_t;
+    }
+    unsafe { _svmla_lane_f32(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmla, IMM_INDEX = 0))]
+pub fn svmla_lane_f64<const IMM_INDEX: i32>(
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.fmla.lane.nxv2f64"
+        )]
+        fn _svmla_lane_f64(
+            op1: svfloat64_t,
+            op2: svfloat64_t,
+            op3: svfloat64_t,
+            IMM_INDEX: i32,
+        ) -> svfloat64_t;
+    }
+    unsafe { _svmla_lane_f64(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmls))]
+pub fn svmls_f32_m(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmls.nxv4f32")]
+        fn _svmls_f32_m(
+            pg: svbool4_t,
+            op1: svfloat32_t,
+            op2: svfloat32_t,
+            op3: svfloat32_t,
+        ) -> svfloat32_t;
+    }
+    unsafe { _svmls_f32_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmls))]
+pub fn svmls_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svmls_f32_m(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmls))]
+pub fn svmls_f32_x(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    svmls_f32_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmls))]
+pub fn svmls_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svmls_f32_x(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmls))]
+pub fn svmls_f32_z(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    svmls_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmls))]
+pub fn svmls_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svmls_f32_z(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmls))]
+pub fn svmls_f64_m(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmls.nxv2f64")]
+        fn _svmls_f64_m(
+            pg: svbool2_t,
+            op1: svfloat64_t,
+            op2: svfloat64_t,
+            op3: svfloat64_t,
+        ) -> svfloat64_t;
+    }
+    unsafe { _svmls_f64_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmls))]
+pub fn svmls_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svmls_f64_m(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmls))]
+pub fn svmls_f64_x(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    svmls_f64_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmls))]
+pub fn svmls_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svmls_f64_x(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmls))]
+pub fn svmls_f64_z(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    svmls_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmls))]
+pub fn svmls_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svmls_f64_z(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv16i8")]
+        fn _svmls_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
+    }
+    unsafe { _svmls_s8_m(pg, op1, op2, op3) }
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svmls_s8_m(pg, op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    svmls_s8_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svmls_s8_x(pg, op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    svmls_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svmls_s8_z(pg, op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv8i16")]
+        fn _svmls_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t)
+            -> svint16_t;
+    }
+    unsafe { _svmls_s16_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svmls_s16_m(pg, op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    svmls_s16_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svmls_s16_x(pg, op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    svmls_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svmls_s16_z(pg, op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv4i32")]
+        fn _svmls_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t)
+            -> svint32_t;
+    }
+    unsafe { _svmls_s32_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svmls_s32_m(pg, op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    svmls_s32_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svmls_s32_x(pg, op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    svmls_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svmls_s32_z(pg, op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mls.nxv2i64")]
+        fn _svmls_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t)
+            -> svint64_t;
+    }
+    unsafe { _svmls_s64_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svmls_s64_m(pg, op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    svmls_s64_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svmls_s64_x(pg, op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    svmls_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svmls_s64_z(pg, op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
+    unsafe { svmls_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
+    svmls_u8_m(pg, op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
+    svmls_u8_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
+    svmls_u8_x(pg, op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
+    svmls_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
+    svmls_u8_z(pg, op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
+    unsafe { svmls_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
+    svmls_u16_m(pg, op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
+    svmls_u16_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
+    svmls_u16_x(pg, op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
+    svmls_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
+    svmls_u16_z(pg, op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    unsafe { svmls_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svmls_u32_m(pg, op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    svmls_u32_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svmls_u32_x(pg, op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    svmls_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svmls_u32_z(pg, op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    unsafe { svmls_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svmls_u64_m(pg, op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    svmls_u64_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svmls_u64_x(pg, op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    svmls_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3)
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mls))]
+pub fn svmls_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svmls_u64_z(pg, op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmls, IMM_INDEX = 0))]
+pub fn svmls_lane_f32<const IMM_INDEX: i32>(
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.fmls.lane.nxv4f32"
+        )]
+        fn _svmls_lane_f32(
+            op1: svfloat32_t,
+            op2: svfloat32_t,
+            op3: svfloat32_t,
+            IMM_INDEX: i32,
+        ) -> svfloat32_t;
+    }
+    unsafe { _svmls_lane_f32(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmls, IMM_INDEX = 0))]
+pub fn svmls_lane_f64<const IMM_INDEX: i32>(
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.fmls.lane.nxv2f64"
+        )]
+        fn _svmls_lane_f64(
+            op1: svfloat64_t,
+            op2: svfloat64_t,
+            op3: svfloat64_t,
+            IMM_INDEX: i32,
+        ) -> svfloat64_t;
+    }
+    unsafe { _svmls_lane_f64(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Matrix multiply-accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_f32])"]
+#[inline]
+#[target_feature(enable = "sve,f32mm")]
+#[cfg_attr(test, assert_instr(fmmla))]
+pub fn svmmla_f32(op1: svfloat32_t, op2: svfloat32_t, op3: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmmla.nxv4f32")]
+        fn _svmmla_f32(op1: svfloat32_t, op2: svfloat32_t, op3: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svmmla_f32(op1, op2, op3) }
+}
+#[doc = "Matrix multiply-accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_f64])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(fmmla))]
+pub fn svmmla_f64(op1: svfloat64_t, op2: svfloat64_t, op3: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmmla.nxv2f64")]
+        fn _svmmla_f64(op1: svfloat64_t, op2: svfloat64_t, op3: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svmmla_f64(op1, op2, op3) }
+}
+#[doc = "Matrix multiply-accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,i8mm")]
+#[cfg_attr(test, assert_instr(smmla))]
+pub fn svmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smmla.nxv4i32")]
+        fn _svmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t;
+    }
+    unsafe { _svmmla_s32(op1, op2, op3) }
+}
+#[doc = "Matrix multiply-accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,i8mm")]
+#[cfg_attr(test, assert_instr(ummla))]
+pub fn svmmla_u32(op1: svuint32_t, op2: svuint8_t, op3: svuint8_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ummla.nxv4i32")]
+        fn _svmmla_u32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t;
+    }
+    unsafe { _svmmla_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Move"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmov[_b]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mov))]
+pub fn svmov_b_z(pg: svbool_t, op: svbool_t) -> svbool_t {
+    svand_b_z(pg, op, op)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmsb))]
+pub fn svmsb_f32_m(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmsb.nxv4f32")]
+        fn _svmsb_f32_m(
+            pg: svbool4_t,
+            op1: svfloat32_t,
+            op2: svfloat32_t,
+            op3: svfloat32_t,
+        ) -> svfloat32_t;
+    }
+    unsafe { _svmsb_f32_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmsb))]
+pub fn svmsb_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svmsb_f32_m(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmsb))]
+pub fn svmsb_f32_x(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    svmsb_f32_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmsb))]
+pub fn svmsb_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svmsb_f32_x(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmsb))]
+pub fn svmsb_f32_z(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    svmsb_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmsb))]
+pub fn svmsb_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svmsb_f32_z(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmsb))]
+pub fn svmsb_f64_m(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmsb.nxv2f64")]
+        fn _svmsb_f64_m(
+            pg: svbool2_t,
+            op1: svfloat64_t,
+            op2: svfloat64_t,
+            op3: svfloat64_t,
+        ) -> svfloat64_t;
+    }
+    unsafe { _svmsb_f64_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmsb))]
+pub fn svmsb_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svmsb_f64_m(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmsb))]
+pub fn svmsb_f64_x(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    svmsb_f64_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmsb))]
+pub fn svmsb_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svmsb_f64_x(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmsb))]
+pub fn svmsb_f64_z(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    svmsb_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmsb))]
+pub fn svmsb_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svmsb_f64_z(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv16i8")]
+        fn _svmsb_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
+    }
+    unsafe { _svmsb_s8_m(pg, op1, op2, op3) }
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svmsb_s8_m(pg, op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    svmsb_s8_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svmsb_s8_x(pg, op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    svmsb_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svmsb_s8_z(pg, op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv8i16")]
+        fn _svmsb_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t, op3: svint16_t)
+            -> svint16_t;
+    }
+    unsafe { _svmsb_s16_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svmsb_s16_m(pg, op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    svmsb_s16_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svmsb_s16_x(pg, op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    svmsb_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svmsb_s16_z(pg, op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv4i32")]
+        fn _svmsb_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t, op3: svint32_t)
+            -> svint32_t;
+    }
+    unsafe { _svmsb_s32_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svmsb_s32_m(pg, op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    svmsb_s32_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svmsb_s32_x(pg, op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    svmsb_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svmsb_s32_z(pg, op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.msb.nxv2i64")]
+        fn _svmsb_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t, op3: svint64_t)
+            -> svint64_t;
+    }
+    unsafe { _svmsb_s64_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svmsb_s64_m(pg, op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    svmsb_s64_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svmsb_s64_x(pg, op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    svmsb_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svmsb_s64_z(pg, op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
+    unsafe { svmsb_s8_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
+    svmsb_u8_m(pg, op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
+    svmsb_u8_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
+    svmsb_u8_x(pg, op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
+    svmsb_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
+    svmsb_u8_z(pg, op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
+    unsafe { svmsb_s16_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
+    svmsb_u16_m(pg, op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
+    svmsb_u16_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
+    svmsb_u16_x(pg, op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
+    svmsb_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
+    svmsb_u16_z(pg, op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    unsafe { svmsb_s32_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svmsb_u32_m(pg, op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    svmsb_u32_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svmsb_u32_x(pg, op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    svmsb_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svmsb_u32_z(pg, op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    unsafe { svmsb_s64_m(pg, op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svmsb_u64_m(pg, op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    svmsb_u64_m(pg, op1, op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svmsb_u64_x(pg, op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    svmsb_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2, op3)
+}
+#[doc = "Multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(msb))]
+pub fn svmsb_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svmsb_u64_z(pg, op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmul))]
+pub fn svmul_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv4f32")]
+        fn _svmul_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svmul_f32_m(pg.into(), op1, op2) }
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmul))]
+pub fn svmul_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svmul_f32_m(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmul))]
+pub fn svmul_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svmul_f32_m(pg, op1, op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmul))]
+pub fn svmul_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svmul_f32_x(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmul))]
+pub fn svmul_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svmul_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmul))]
+pub fn svmul_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svmul_f32_z(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmul))]
+pub fn svmul_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmul.nxv2f64")]
+        fn _svmul_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svmul_f64_m(pg.into(), op1, op2) }
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmul))]
+pub fn svmul_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svmul_f64_m(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmul))]
+pub fn svmul_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svmul_f64_m(pg, op1, op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmul))]
+pub fn svmul_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svmul_f64_x(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmul))]
+pub fn svmul_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svmul_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmul))]
+pub fn svmul_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svmul_f64_z(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv16i8")]
+        fn _svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svmul_s8_m(pg, op1, op2) }
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svmul_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svmul_s8_m(pg, op1, op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svmul_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svmul_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svmul_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv8i16")]
+        fn _svmul_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svmul_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svmul_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svmul_s16_m(pg, op1, op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svmul_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svmul_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svmul_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv4i32")]
+        fn _svmul_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svmul_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svmul_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svmul_s32_m(pg, op1, op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svmul_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svmul_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svmul_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.mul.nxv2i64")]
+        fn _svmul_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svmul_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svmul_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svmul_s64_m(pg, op1, op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svmul_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svmul_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svmul_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svmul_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svmul_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svmul_u8_m(pg, op1, op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svmul_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svmul_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svmul_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svmul_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svmul_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svmul_u16_m(pg, op1, op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svmul_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svmul_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svmul_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svmul_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svmul_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svmul_u32_m(pg, op1, op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svmul_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svmul_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svmul_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svmul_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svmul_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svmul_u64_m(pg, op1, op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svmul_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svmul_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(mul))]
+pub fn svmul_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svmul_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv16i8")]
+        fn _svmulh_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svmulh_s8_m(pg, op1, op2) }
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svmulh_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svmulh_s8_m(pg, op1, op2)
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svmulh_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svmulh_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svmulh_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv8i16")]
+        fn _svmulh_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svmulh_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svmulh_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svmulh_s16_m(pg, op1, op2)
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svmulh_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svmulh_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svmulh_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv4i32")]
+        fn _svmulh_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svmulh_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svmulh_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svmulh_s32_m(pg, op1, op2)
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svmulh_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svmulh_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svmulh_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smulh.nxv2i64")]
+        fn _svmulh_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svmulh_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svmulh_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svmulh_s64_m(pg, op1, op2)
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svmulh_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svmulh_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(smulh))]
+pub fn svmulh_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svmulh_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv16i8")]
+        fn _svmulh_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svmulh_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svmulh_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svmulh_u8_m(pg, op1, op2)
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svmulh_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svmulh_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svmulh_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv8i16")]
+        fn _svmulh_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svmulh_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svmulh_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svmulh_u16_m(pg, op1, op2)
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svmulh_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svmulh_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svmulh_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv4i32")]
+        fn _svmulh_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svmulh_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svmulh_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svmulh_u32_m(pg, op1, op2)
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svmulh_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svmulh_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svmulh_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umulh.nxv2i64")]
+        fn _svmulh_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svmulh_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svmulh_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svmulh_u64_m(pg, op1, op2)
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svmulh_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svmulh_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Multiply, returning high-half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(umulh))]
+pub fn svmulh_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svmulh_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Multiply extended (∞×0=2)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmulx))]
+pub fn svmulx_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmulx.nxv4f32")]
+        fn _svmulx_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svmulx_f32_m(pg.into(), op1, op2) }
+}
+#[doc = "Multiply extended (∞×0=2)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmulx))]
+pub fn svmulx_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svmulx_f32_m(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Multiply extended (∞×0=2)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmulx))]
+pub fn svmulx_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svmulx_f32_m(pg, op1, op2)
+}
+#[doc = "Multiply extended (∞×0=2)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmulx))]
+pub fn svmulx_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svmulx_f32_x(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Multiply extended (∞×0=2)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmulx))]
+pub fn svmulx_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svmulx_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
+}
+#[doc = "Multiply extended (∞×0=2)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmulx))]
+pub fn svmulx_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svmulx_f32_z(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Multiply extended (∞×0=2)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmulx))]
+pub fn svmulx_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmulx.nxv2f64")]
+        fn _svmulx_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svmulx_f64_m(pg.into(), op1, op2) }
+}
+#[doc = "Multiply extended (∞×0=2)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmulx))]
+pub fn svmulx_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svmulx_f64_m(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Multiply extended (∞×0=2)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmulx))]
+pub fn svmulx_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svmulx_f64_m(pg, op1, op2)
+}
+#[doc = "Multiply extended (∞×0=2)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmulx))]
+pub fn svmulx_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svmulx_f64_x(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Multiply extended (∞×0=2)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmulx))]
+pub fn svmulx_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svmulx_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
+}
+#[doc = "Multiply extended (∞×0=2)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fmulx))]
+pub fn svmulx_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svmulx_f64_z(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Bitwise NAND"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnand[_b]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(nand))]
+pub fn svnand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nand.z.nxv16i1")]
+        fn _svnand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
+    }
+    unsafe { _svnand_b_z(pg, op1, op2) }
+}
+#[doc = "Negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fneg))]
+pub fn svneg_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fneg.nxv4f32")]
+        fn _svneg_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svneg_f32_m(inactive, pg.into(), op) }
+}
+#[doc = "Negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fneg))]
+pub fn svneg_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svneg_f32_m(op, pg, op)
+}
+#[doc = "Negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fneg))]
+pub fn svneg_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svneg_f32_m(svdup_n_f32(0.0), pg, op)
+}
+#[doc = "Negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fneg))]
+pub fn svneg_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fneg.nxv2f64")]
+        fn _svneg_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svneg_f64_m(inactive, pg.into(), op) }
+}
+#[doc = "Negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fneg))]
+pub fn svneg_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svneg_f64_m(op, pg, op)
+}
+#[doc = "Negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fneg))]
+pub fn svneg_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svneg_f64_m(svdup_n_f64(0.0), pg, op)
+}
+#[doc = "Negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(neg))]
+pub fn svneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv16i8")]
+        fn _svneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
+    }
+    unsafe { _svneg_s8_m(inactive, pg, op) }
+}
+#[doc = "Negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(neg))]
+pub fn svneg_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t {
+    svneg_s8_m(op, pg, op)
+}
+#[doc = "Negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(neg))]
+pub fn svneg_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t {
+    svneg_s8_m(svdup_n_s8(0), pg, op)
+}
+#[doc = "Negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(neg))]
+pub fn svneg_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv8i16")]
+        fn _svneg_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
+    }
+    unsafe { _svneg_s16_m(inactive, pg.into(), op) }
+}
+#[doc = "Negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(neg))]
+pub fn svneg_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t {
+    svneg_s16_m(op, pg, op)
+}
+#[doc = "Negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(neg))]
+pub fn svneg_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t {
+    svneg_s16_m(svdup_n_s16(0), pg, op)
+}
+#[doc = "Negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(neg))]
+pub fn svneg_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv4i32")]
+        fn _svneg_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svneg_s32_m(inactive, pg.into(), op) }
+}
+#[doc = "Negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(neg))]
+pub fn svneg_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svneg_s32_m(op, pg, op)
+}
+#[doc = "Negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(neg))]
+pub fn svneg_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svneg_s32_m(svdup_n_s32(0), pg, op)
+}
+#[doc = "Negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(neg))]
+pub fn svneg_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.neg.nxv2i64")]
+        fn _svneg_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svneg_s64_m(inactive, pg.into(), op) }
+}
+#[doc = "Negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(neg))]
+pub fn svneg_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svneg_s64_m(op, pg, op)
+}
+#[doc = "Negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(neg))]
+pub fn svneg_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svneg_s64_m(svdup_n_s64(0), pg, op)
+}
+#[doc = "Negated multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmad))]
+pub fn svnmad_f32_m(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmad.nxv4f32")]
+        fn _svnmad_f32_m(
+            pg: svbool4_t,
+            op1: svfloat32_t,
+            op2: svfloat32_t,
+            op3: svfloat32_t,
+        ) -> svfloat32_t;
+    }
+    unsafe { _svnmad_f32_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Negated multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmad))]
+pub fn svnmad_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svnmad_f32_m(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Negated multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmad))]
+pub fn svnmad_f32_x(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    svnmad_f32_m(pg, op1, op2, op3)
+}
+#[doc = "Negated multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmad))]
+pub fn svnmad_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svnmad_f32_x(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Negated multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmad))]
+pub fn svnmad_f32_z(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    svnmad_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3)
+}
+#[doc = "Negated multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmad))]
+pub fn svnmad_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svnmad_f32_z(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Negated multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmad))]
+pub fn svnmad_f64_m(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmad.nxv2f64")]
+        fn _svnmad_f64_m(
+            pg: svbool2_t,
+            op1: svfloat64_t,
+            op2: svfloat64_t,
+            op3: svfloat64_t,
+        ) -> svfloat64_t;
+    }
+    unsafe { _svnmad_f64_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Negated multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmad))]
+pub fn svnmad_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svnmad_f64_m(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Negated multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmad))]
+pub fn svnmad_f64_x(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    svnmad_f64_m(pg, op1, op2, op3)
+}
+#[doc = "Negated multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmad))]
+pub fn svnmad_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svnmad_f64_x(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Negated multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmad))]
+pub fn svnmad_f64_z(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    svnmad_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3)
+}
+#[doc = "Negated multiply-add, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmad))]
+pub fn svnmad_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svnmad_f64_z(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Negated multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmla))]
+pub fn svnmla_f32_m(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmla.nxv4f32")]
+        fn _svnmla_f32_m(
+            pg: svbool4_t,
+            op1: svfloat32_t,
+            op2: svfloat32_t,
+            op3: svfloat32_t,
+        ) -> svfloat32_t;
+    }
+    unsafe { _svnmla_f32_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Negated multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmla))]
+pub fn svnmla_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svnmla_f32_m(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Negated multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmla))]
+pub fn svnmla_f32_x(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    svnmla_f32_m(pg, op1, op2, op3)
+}
+#[doc = "Negated multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmla))]
+pub fn svnmla_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svnmla_f32_x(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Negated multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmla))]
+pub fn svnmla_f32_z(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    svnmla_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3)
+}
+#[doc = "Negated multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmla))]
+pub fn svnmla_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svnmla_f32_z(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Negated multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmla))]
+pub fn svnmla_f64_m(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmla.nxv2f64")]
+        fn _svnmla_f64_m(
+            pg: svbool2_t,
+            op1: svfloat64_t,
+            op2: svfloat64_t,
+            op3: svfloat64_t,
+        ) -> svfloat64_t;
+    }
+    unsafe { _svnmla_f64_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Negated multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmla))]
+pub fn svnmla_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svnmla_f64_m(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Negated multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmla))]
+pub fn svnmla_f64_x(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    svnmla_f64_m(pg, op1, op2, op3)
+}
+#[doc = "Negated multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmla))]
+pub fn svnmla_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svnmla_f64_x(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Negated multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmla))]
+pub fn svnmla_f64_z(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    svnmla_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3)
+}
+#[doc = "Negated multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmla))]
+pub fn svnmla_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svnmla_f64_z(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Negated multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmls))]
+pub fn svnmls_f32_m(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmls.nxv4f32")]
+        fn _svnmls_f32_m(
+            pg: svbool4_t,
+            op1: svfloat32_t,
+            op2: svfloat32_t,
+            op3: svfloat32_t,
+        ) -> svfloat32_t;
+    }
+    unsafe { _svnmls_f32_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Negated multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmls))]
+pub fn svnmls_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svnmls_f32_m(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Negated multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmls))]
+pub fn svnmls_f32_x(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    svnmls_f32_m(pg, op1, op2, op3)
+}
+#[doc = "Negated multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmls))]
+pub fn svnmls_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svnmls_f32_x(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Negated multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmls))]
+pub fn svnmls_f32_z(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    svnmls_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3)
+}
+#[doc = "Negated multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmls))]
+pub fn svnmls_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svnmls_f32_z(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Negated multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmls))]
+pub fn svnmls_f64_m(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmls.nxv2f64")]
+        fn _svnmls_f64_m(
+            pg: svbool2_t,
+            op1: svfloat64_t,
+            op2: svfloat64_t,
+            op3: svfloat64_t,
+        ) -> svfloat64_t;
+    }
+    unsafe { _svnmls_f64_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Negated multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmls))]
+pub fn svnmls_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svnmls_f64_m(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Negated multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmls))]
+pub fn svnmls_f64_x(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    svnmls_f64_m(pg, op1, op2, op3)
+}
+#[doc = "Negated multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmls))]
+pub fn svnmls_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svnmls_f64_x(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Negated multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmls))]
+pub fn svnmls_f64_z(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    svnmls_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3)
+}
+#[doc = "Negated multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmls))]
+pub fn svnmls_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svnmls_f64_z(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Negated multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmsb))]
+pub fn svnmsb_f32_m(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmsb.nxv4f32")]
+        fn _svnmsb_f32_m(
+            pg: svbool4_t,
+            op1: svfloat32_t,
+            op2: svfloat32_t,
+            op3: svfloat32_t,
+        ) -> svfloat32_t;
+    }
+    unsafe { _svnmsb_f32_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Negated multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmsb))]
+pub fn svnmsb_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svnmsb_f32_m(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Negated multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmsb))]
+pub fn svnmsb_f32_x(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    svnmsb_f32_m(pg, op1, op2, op3)
+}
+#[doc = "Negated multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmsb))]
+pub fn svnmsb_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svnmsb_f32_x(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Negated multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmsb))]
+pub fn svnmsb_f32_z(
+    pg: svbool_t,
+    op1: svfloat32_t,
+    op2: svfloat32_t,
+    op3: svfloat32_t,
+) -> svfloat32_t {
+    svnmsb_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2, op3)
+}
+#[doc = "Negated multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmsb))]
+pub fn svnmsb_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) -> svfloat32_t {
+    svnmsb_f32_z(pg, op1, op2, svdup_n_f32(op3))
+}
+#[doc = "Negated multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmsb))]
+pub fn svnmsb_f64_m(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fnmsb.nxv2f64")]
+        fn _svnmsb_f64_m(
+            pg: svbool2_t,
+            op1: svfloat64_t,
+            op2: svfloat64_t,
+            op3: svfloat64_t,
+        ) -> svfloat64_t;
+    }
+    unsafe { _svnmsb_f64_m(pg.into(), op1, op2, op3) }
+}
+#[doc = "Negated multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmsb))]
+pub fn svnmsb_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svnmsb_f64_m(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Negated multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmsb))]
+pub fn svnmsb_f64_x(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    svnmsb_f64_m(pg, op1, op2, op3)
+}
+#[doc = "Negated multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmsb))]
+pub fn svnmsb_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svnmsb_f64_x(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Negated multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmsb))]
+pub fn svnmsb_f64_z(
+    pg: svbool_t,
+    op1: svfloat64_t,
+    op2: svfloat64_t,
+    op3: svfloat64_t,
+) -> svfloat64_t {
+    svnmsb_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2, op3)
+}
+#[doc = "Negated multiply-subtract, multiplicand first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fnmsb))]
+pub fn svnmsb_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) -> svfloat64_t {
+    svnmsb_f64_z(pg, op1, op2, svdup_n_f64(op3))
+}
+#[doc = "Bitwise NOR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnor[_b]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(nor))]
+pub fn svnor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nor.z.nxv16i1")]
+        fn _svnor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
+    }
+    unsafe { _svnor_b_z(pg, op1, op2) }
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_b]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_b_z(pg: svbool_t, op: svbool_t) -> svbool_t {
+    sveor_b_z(pg, op, pg)
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv16i8")]
+        fn _svnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
+    }
+    unsafe { _svnot_s8_m(inactive, pg, op) }
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t {
+    svnot_s8_m(op, pg, op)
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t {
+    svnot_s8_m(svdup_n_s8(0), pg, op)
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv8i16")]
+        fn _svnot_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
+    }
+    unsafe { _svnot_s16_m(inactive, pg.into(), op) }
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t {
+    svnot_s16_m(op, pg, op)
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t {
+    svnot_s16_m(svdup_n_s16(0), pg, op)
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv4i32")]
+        fn _svnot_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svnot_s32_m(inactive, pg.into(), op) }
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svnot_s32_m(op, pg, op)
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svnot_s32_m(svdup_n_s32(0), pg, op)
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.not.nxv2i64")]
+        fn _svnot_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svnot_s64_m(inactive, pg.into(), op) }
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svnot_s64_m(op, pg, op)
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svnot_s64_m(svdup_n_s64(0), pg, op)
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t {
+    unsafe { svnot_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t {
+    svnot_u8_m(op, pg, op)
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t {
+    svnot_u8_m(svdup_n_u8(0), pg, op)
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    unsafe { svnot_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    svnot_u16_m(op, pg, op)
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    svnot_u16_m(svdup_n_u16(0), pg, op)
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    unsafe { svnot_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svnot_u32_m(op, pg, op)
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svnot_u32_m(svdup_n_u32(0), pg, op)
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    unsafe { svnot_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svnot_u64_m(op, pg, op)
+}
+#[doc = "Bitwise invert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(not))]
+pub fn svnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svnot_u64_m(svdup_n_u64(0), pg, op)
+}
+#[doc = "Bitwise inclusive OR, inverting second argument"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorn[_b]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orn))]
+pub fn svorn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orn.z.nvx16i1")]
+        fn _svorn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
+    }
+    unsafe { _svorn_b_z(pg, op1, op2) }
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_b]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.z.nvx16i1")]
+        fn _svorr_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t;
+    }
+    unsafe { _svorr_b_z(pg, op1, op2) }
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv16i8")]
+        fn _svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svorr_s8_m(pg, op1, op2) }
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svorr_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svorr_s8_m(pg, op1, op2)
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svorr_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svorr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svorr_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv8i16")]
+        fn _svorr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svorr_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svorr_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svorr_s16_m(pg, op1, op2)
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svorr_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svorr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svorr_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv4i32")]
+        fn _svorr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svorr_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svorr_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svorr_s32_m(pg, op1, op2)
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svorr_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svorr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svorr_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orr.nxv2i64")]
+        fn _svorr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svorr_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svorr_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svorr_s64_m(pg, op1, op2)
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svorr_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svorr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svorr_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svorr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svorr_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svorr_u8_m(pg, op1, op2)
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svorr_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svorr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svorr_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svorr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svorr_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svorr_u16_m(pg, op1, op2)
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svorr_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svorr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svorr_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svorr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svorr_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svorr_u32_m(pg, op1, op2)
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svorr_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svorr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svorr_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svorr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svorr_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svorr_u64_m(pg, op1, op2)
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svorr_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svorr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Bitwise inclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orr))]
+pub fn svorr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svorr_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Bitwise inclusive OR reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orv))]
+pub fn svorv_s8(pg: svbool_t, op: svint8_t) -> i8 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv16i8")]
+        fn _svorv_s8(pg: svbool_t, op: svint8_t) -> i8;
+    }
+    unsafe { _svorv_s8(pg, op) }
+}
+#[doc = "Bitwise inclusive OR reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orv))]
+pub fn svorv_s16(pg: svbool_t, op: svint16_t) -> i16 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv8i16")]
+        fn _svorv_s16(pg: svbool8_t, op: svint16_t) -> i16;
+    }
+    unsafe { _svorv_s16(pg.into(), op) }
+}
+#[doc = "Bitwise inclusive OR reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orv))]
+pub fn svorv_s32(pg: svbool_t, op: svint32_t) -> i32 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv4i32")]
+        fn _svorv_s32(pg: svbool4_t, op: svint32_t) -> i32;
+    }
+    unsafe { _svorv_s32(pg.into(), op) }
+}
+#[doc = "Bitwise inclusive OR reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orv))]
+pub fn svorv_s64(pg: svbool_t, op: svint64_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.orv.nxv2i64")]
+        fn _svorv_s64(pg: svbool2_t, op: svint64_t) -> i64;
+    }
+    unsafe { _svorv_s64(pg.into(), op) }
+}
+#[doc = "Bitwise inclusive OR reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orv))]
+pub fn svorv_u8(pg: svbool_t, op: svuint8_t) -> u8 {
+    unsafe { svorv_s8(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise inclusive OR reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orv))]
+pub fn svorv_u16(pg: svbool_t, op: svuint16_t) -> u16 {
+    unsafe { svorv_s16(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise inclusive OR reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orv))]
+pub fn svorv_u32(pg: svbool_t, op: svuint32_t) -> u32 {
+    unsafe { svorv_s32(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise inclusive OR reduction to scalar"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(orv))]
+pub fn svorv_u64(pg: svbool_t, op: svuint64_t) -> u64 {
+    unsafe { svorv_s64(pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Set all predicate elements to false"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpfalse[_b])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(pfalse))]
+pub fn svpfalse_b() -> svbool_t {
+    svdupq_n_b8(
+        false, false, false, false, false, false, false, false, false, false, false, false, false,
+        false, false, false,
+    )
+}
+#[doc = "Set the first active predicate element to true"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpfirst[_b])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(pfirst))]
+pub fn svpfirst_b(pg: svbool_t, op: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pfirst.nxv16i1")]
+        fn _svpfirst_b(pg: svbool_t, op: svbool_t) -> svbool_t;
+    }
+    unsafe { _svpfirst_b(pg, op) }
+}
+#[doc = "Find next active predicate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(pnext))]
+pub fn svpnext_b8(pg: svbool_t, op: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv16i1")]
+        fn _svpnext_b8(pg: svbool_t, op: svbool_t) -> svbool_t;
+    }
+    unsafe { _svpnext_b8(pg, op) }
+}
+#[doc = "Find next active predicate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(pnext))]
+pub fn svpnext_b16(pg: svbool_t, op: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv8i1")]
+        fn _svpnext_b16(pg: svbool8_t, op: svbool8_t) -> svbool8_t;
+    }
+    unsafe { _svpnext_b16(pg.into(), op.into()).into() }
+}
+#[doc = "Find next active predicate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(pnext))]
+pub fn svpnext_b32(pg: svbool_t, op: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv4i1")]
+        fn _svpnext_b32(pg: svbool4_t, op: svbool4_t) -> svbool4_t;
+    }
+    unsafe { _svpnext_b32(pg.into(), op.into()).into() }
+}
+#[doc = "Find next active predicate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(pnext))]
+pub fn svpnext_b64(pg: svbool_t, op: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pnext.nxv2i1")]
+        fn _svpnext_b64(pg: svbool2_t, op: svbool2_t) -> svbool2_t;
+    }
+    unsafe { _svpnext_b64(pg.into(), op.into()).into() }
+}
+#[doc = "Prefetch bytes"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfb<const OP: svprfop, T>(pg: svbool_t, base: *const T) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv16i1")]
+        fn _svprfb(pg: svbool_t, base: *const crate::ffi::c_void, op: svprfop);
+    }
+    _svprfb(pg, base as *const crate::ffi::c_void, OP)
+}
+#[doc = "Prefetch halfwords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfh<const OP: svprfop, T>(pg: svbool_t, base: *const T) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv8i1")]
+        fn _svprfh(pg: svbool8_t, base: *const crate::ffi::c_void, op: svprfop);
+    }
+    _svprfh(pg.into(), base as *const crate::ffi::c_void, OP)
+}
+#[doc = "Prefetch words"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfw<const OP: svprfop, T>(pg: svbool_t, base: *const T) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv4i1")]
+        fn _svprfw(pg: svbool4_t, base: *const crate::ffi::c_void, op: svprfop);
+    }
+    _svprfw(pg.into(), base as *const crate::ffi::c_void, OP)
+}
+#[doc = "Prefetch doublewords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfd<const OP: svprfop, T>(pg: svbool_t, base: *const T) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.prf.nxv2i1")]
+        fn _svprfd(pg: svbool2_t, base: *const crate::ffi::c_void, op: svprfop);
+    }
+    _svprfd(pg.into(), base as *const crate::ffi::c_void, OP)
+}
+#[doc = "Prefetch bytes"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[s32]offset)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfb_gather_s32offset<const OP: svprfop, T>(
+    pg: svbool_t,
+    base: *const T,
+    offsets: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfb.gather.sxtw.index.nxv4i32"
+        )]
+        fn _svprfb_gather_s32offset(
+            pg: svbool4_t,
+            base: *const crate::ffi::c_void,
+            offsets: svint32_t,
+            op: svprfop,
+        );
+    }
+    _svprfb_gather_s32offset(pg.into(), base as *const crate::ffi::c_void, offsets, OP)
+}
+#[doc = "Prefetch halfwords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[s32]index)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfh_gather_s32index<const OP: svprfop, T>(
+    pg: svbool_t,
+    base: *const T,
+    indices: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfh.gather.sxtw.index.nxv4i32"
+        )]
+        fn _svprfh_gather_s32index(
+            pg: svbool4_t,
+            base: *const crate::ffi::c_void,
+            indices: svint32_t,
+            op: svprfop,
+        );
+    }
+    _svprfh_gather_s32index(pg.into(), base as *const crate::ffi::c_void, indices, OP)
+}
+#[doc = "Prefetch words"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[s32]index)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfw_gather_s32index<const OP: svprfop, T>(
+    pg: svbool_t,
+    base: *const T,
+    indices: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfw.gather.sxtw.index.nxv4i32"
+        )]
+        fn _svprfw_gather_s32index(
+            pg: svbool4_t,
+            base: *const crate::ffi::c_void,
+            indices: svint32_t,
+            op: svprfop,
+        );
+    }
+    _svprfw_gather_s32index(pg.into(), base as *const crate::ffi::c_void, indices, OP)
+}
+#[doc = "Prefetch doublewords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[s32]index)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfd_gather_s32index<const OP: svprfop, T>(
+    pg: svbool_t,
+    base: *const T,
+    indices: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfd.gather.sxtw.index.nxv4i32"
+        )]
+        fn _svprfd_gather_s32index(
+            pg: svbool4_t,
+            base: *const crate::ffi::c_void,
+            indices: svint32_t,
+            op: svprfop,
+        );
+    }
+    _svprfd_gather_s32index(pg.into(), base as *const crate::ffi::c_void, indices, OP)
+}
+#[doc = "Prefetch bytes"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[s64]offset)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfb_gather_s64offset<const OP: svprfop, T>(
+    pg: svbool_t,
+    base: *const T,
+    offsets: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfb.gather.index.nxv2i64"
+        )]
+        fn _svprfb_gather_s64offset(
+            pg: svbool2_t,
+            base: *const crate::ffi::c_void,
+            offsets: svint64_t,
+            op: svprfop,
+        );
+    }
+    _svprfb_gather_s64offset(pg.into(), base as *const crate::ffi::c_void, offsets, OP)
+}
+#[doc = "Prefetch halfwords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[s64]index)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfh_gather_s64index<const OP: svprfop, T>(
+    pg: svbool_t,
+    base: *const T,
+    indices: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfh.gather.index.nxv2i64"
+        )]
+        fn _svprfh_gather_s64index(
+            pg: svbool2_t,
+            base: *const crate::ffi::c_void,
+            indices: svint64_t,
+            op: svprfop,
+        );
+    }
+    _svprfh_gather_s64index(pg.into(), base as *const crate::ffi::c_void, indices, OP)
+}
+#[doc = "Prefetch words"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[s64]index)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfw_gather_s64index<const OP: svprfop, T>(
+    pg: svbool_t,
+    base: *const T,
+    indices: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfw.gather.index.nxv2i64"
+        )]
+        fn _svprfw_gather_s64index(
+            pg: svbool2_t,
+            base: *const crate::ffi::c_void,
+            indices: svint64_t,
+            op: svprfop,
+        );
+    }
+    _svprfw_gather_s64index(pg.into(), base as *const crate::ffi::c_void, indices, OP)
+}
+#[doc = "Prefetch doublewords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[s64]index)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfd_gather_s64index<const OP: svprfop, T>(
+    pg: svbool_t,
+    base: *const T,
+    indices: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfd.gather.index.nxv2i64"
+        )]
+        fn _svprfd_gather_s64index(
+            pg: svbool2_t,
+            base: *const crate::ffi::c_void,
+            indices: svint64_t,
+            op: svprfop,
+        );
+    }
+    _svprfd_gather_s64index(pg.into(), base as *const crate::ffi::c_void, indices, OP)
+}
+#[doc = "Prefetch bytes"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[u32]offset)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfb_gather_u32offset<const OP: svprfop, T>(
+    pg: svbool_t,
+    base: *const T,
+    offsets: svuint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfb.gather.uxtw.index.nxv4i32"
+        )]
+        fn _svprfb_gather_u32offset(
+            pg: svbool4_t,
+            base: *const crate::ffi::c_void,
+            offsets: svint32_t,
+            op: svprfop,
+        );
+    }
+    _svprfb_gather_u32offset(
+        pg.into(),
+        base as *const crate::ffi::c_void,
+        offsets.as_signed(),
+        OP,
+    )
+}
+#[doc = "Prefetch halfwords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[u32]index)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfh_gather_u32index<const OP: svprfop, T>(
+    pg: svbool_t,
+    base: *const T,
+    indices: svuint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfh.gather.uxtw.index.nxv4i32"
+        )]
+        fn _svprfh_gather_u32index(
+            pg: svbool4_t,
+            base: *const crate::ffi::c_void,
+            indices: svint32_t,
+            op: svprfop,
+        );
+    }
+    _svprfh_gather_u32index(
+        pg.into(),
+        base as *const crate::ffi::c_void,
+        indices.as_signed(),
+        OP,
+    )
+}
+#[doc = "Prefetch words"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[u32]index)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfw_gather_u32index<const OP: svprfop, T>(
+    pg: svbool_t,
+    base: *const T,
+    indices: svuint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfw.gather.uxtw.index.nxv4i32"
+        )]
+        fn _svprfw_gather_u32index(
+            pg: svbool4_t,
+            base: *const crate::ffi::c_void,
+            indices: svint32_t,
+            op: svprfop,
+        );
+    }
+    _svprfw_gather_u32index(
+        pg.into(),
+        base as *const crate::ffi::c_void,
+        indices.as_signed(),
+        OP,
+    )
+}
+#[doc = "Prefetch doublewords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[u32]index)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfd_gather_u32index<const OP: svprfop, T>(
+    pg: svbool_t,
+    base: *const T,
+    indices: svuint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfd.gather.uxtw.index.nxv4i32"
+        )]
+        fn _svprfd_gather_u32index(
+            pg: svbool4_t,
+            base: *const crate::ffi::c_void,
+            indices: svint32_t,
+            op: svprfop,
+        );
+    }
+    _svprfd_gather_u32index(
+        pg.into(),
+        base as *const crate::ffi::c_void,
+        indices.as_signed(),
+        OP,
+    )
+}
+#[doc = "Prefetch bytes"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[u64]offset)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfb_gather_u64offset<const OP: svprfop, T>(
+    pg: svbool_t,
+    base: *const T,
+    offsets: svuint64_t,
+) {
+    svprfb_gather_s64offset::<OP, T>(pg, base, offsets.as_signed())
+}
+#[doc = "Prefetch halfwords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[u64]index)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfh_gather_u64index<const OP: svprfop, T>(
+    pg: svbool_t,
+    base: *const T,
+    indices: svuint64_t,
+) {
+    svprfh_gather_s64index::<OP, T>(pg, base, indices.as_signed())
+}
+#[doc = "Prefetch words"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[u64]index)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfw_gather_u64index<const OP: svprfop, T>(
+    pg: svbool_t,
+    base: *const T,
+    indices: svuint64_t,
+) {
+    svprfw_gather_s64index::<OP, T>(pg, base, indices.as_signed())
+}
+#[doc = "Prefetch doublewords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[u64]index)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfd_gather_u64index<const OP: svprfop, T>(
+    pg: svbool_t,
+    base: *const T,
+    indices: svuint64_t,
+) {
+    svprfd_gather_s64index::<OP, T>(pg, base, indices.as_signed())
+}
+#[doc = "Prefetch bytes"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u32base])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
+pub unsafe fn svprfb_gather_u32base<const OP: svprfop>(pg: svbool_t, bases: svuint32_t) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv4i32"
+        )]
+        fn _svprfb_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop);
+    }
+    _svprfb_gather_u32base(pg.into(), bases.as_signed(), 0, OP)
+}
+#[doc = "Prefetch halfwords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u32base])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP }))]
+pub unsafe fn svprfh_gather_u32base<const OP: svprfop>(pg: svbool_t, bases: svuint32_t) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv4i32"
+        )]
+        fn _svprfh_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop);
+    }
+    _svprfh_gather_u32base(pg.into(), bases.as_signed(), 0, OP)
+}
+#[doc = "Prefetch words"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u32base])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP }))]
+pub unsafe fn svprfw_gather_u32base<const OP: svprfop>(pg: svbool_t, bases: svuint32_t) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv4i32"
+        )]
+        fn _svprfw_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop);
+    }
+    _svprfw_gather_u32base(pg.into(), bases.as_signed(), 0, OP)
+}
+#[doc = "Prefetch doublewords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u32base])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP }))]
+pub unsafe fn svprfd_gather_u32base<const OP: svprfop>(pg: svbool_t, bases: svuint32_t) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv4i32"
+        )]
+        fn _svprfd_gather_u32base(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop);
+    }
+    _svprfd_gather_u32base(pg.into(), bases.as_signed(), 0, OP)
+}
+#[doc = "Prefetch bytes"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u64base])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
+pub unsafe fn svprfb_gather_u64base<const OP: svprfop>(pg: svbool_t, bases: svuint64_t) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv2i64"
+        )]
+        fn _svprfb_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop);
+    }
+    _svprfb_gather_u64base(pg.into(), bases.as_signed(), 0, OP)
+}
+#[doc = "Prefetch halfwords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u64base])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP }))]
+pub unsafe fn svprfh_gather_u64base<const OP: svprfop>(pg: svbool_t, bases: svuint64_t) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv2i64"
+        )]
+        fn _svprfh_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop);
+    }
+    _svprfh_gather_u64base(pg.into(), bases.as_signed(), 0, OP)
+}
+#[doc = "Prefetch words"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u64base])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP }))]
+pub unsafe fn svprfw_gather_u64base<const OP: svprfop>(pg: svbool_t, bases: svuint64_t) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv2i64"
+        )]
+        fn _svprfw_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop);
+    }
+    _svprfw_gather_u64base(pg.into(), bases.as_signed(), 0, OP)
+}
+#[doc = "Prefetch doublewords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u64base])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP }))]
+pub unsafe fn svprfd_gather_u64base<const OP: svprfop>(pg: svbool_t, bases: svuint64_t) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv2i64"
+        )]
+        fn _svprfd_gather_u64base(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop);
+    }
+    _svprfd_gather_u64base(pg.into(), bases.as_signed(), 0, OP)
+}
+#[doc = "Prefetch bytes"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u32base]_offset)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
+pub unsafe fn svprfb_gather_u32base_offset<const OP: svprfop>(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv4i32"
+        )]
+        fn _svprfb_gather_u32base_offset(pg: svbool4_t, bases: svint32_t, offset: i64, op: svprfop);
+    }
+    _svprfb_gather_u32base_offset(pg.into(), bases.as_signed(), offset, OP)
+}
+#[doc = "Prefetch halfwords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u32base]_index)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
+pub unsafe fn svprfh_gather_u32base_index<const OP: svprfop>(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv4i32"
+        )]
+        fn _svprfh_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop);
+    }
+    _svprfh_gather_u32base_index(pg.into(), bases.as_signed(), index.unchecked_shl(1), OP)
+}
+#[doc = "Prefetch words"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u32base]_index)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
+pub unsafe fn svprfw_gather_u32base_index<const OP: svprfop>(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv4i32"
+        )]
+        fn _svprfw_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop);
+    }
+    _svprfw_gather_u32base_index(pg.into(), bases.as_signed(), index.unchecked_shl(2), OP)
+}
+#[doc = "Prefetch doublewords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u32base]_index)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
+pub unsafe fn svprfd_gather_u32base_index<const OP: svprfop>(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv4i32"
+        )]
+        fn _svprfd_gather_u32base_index(pg: svbool4_t, bases: svint32_t, index: i64, op: svprfop);
+    }
+    _svprfd_gather_u32base_index(pg.into(), bases.as_signed(), index.unchecked_shl(3), OP)
+}
+#[doc = "Prefetch bytes"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather[_u64base]_offset)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
+pub unsafe fn svprfb_gather_u64base_offset<const OP: svprfop>(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfb.gather.scalar.offset.nxv2i64"
+        )]
+        fn _svprfb_gather_u64base_offset(pg: svbool2_t, bases: svint64_t, offset: i64, op: svprfop);
+    }
+    _svprfb_gather_u64base_offset(pg.into(), bases.as_signed(), offset, OP)
+}
+#[doc = "Prefetch halfwords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather[_u64base]_index)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
+pub unsafe fn svprfh_gather_u64base_index<const OP: svprfop>(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfh.gather.scalar.offset.nxv2i64"
+        )]
+        fn _svprfh_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop);
+    }
+    _svprfh_gather_u64base_index(pg.into(), bases.as_signed(), index.unchecked_shl(1), OP)
+}
+#[doc = "Prefetch words"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather[_u64base]_index)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
+pub unsafe fn svprfw_gather_u64base_index<const OP: svprfop>(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfw.gather.scalar.offset.nxv2i64"
+        )]
+        fn _svprfw_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop);
+    }
+    _svprfw_gather_u64base_index(pg.into(), bases.as_signed(), index.unchecked_shl(2), OP)
+}
+#[doc = "Prefetch doublewords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather[_u64base]_index)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))]
+pub unsafe fn svprfd_gather_u64base_index<const OP: svprfop>(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.prfd.gather.scalar.offset.nxv2i64"
+        )]
+        fn _svprfd_gather_u64base_index(pg: svbool2_t, bases: svint64_t, index: i64, op: svprfop);
+    }
+    _svprfd_gather_u64base_index(pg.into(), bases.as_signed(), index.unchecked_shl(3), OP)
+}
+#[doc = "Prefetch bytes"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_vnum)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfb_vnum<const OP: svprfop, T>(pg: svbool_t, base: *const T, vnum: i64) {
+    svprfb::<OP, _>(pg, base.offset(svcntb() as isize * vnum as isize))
+}
+#[doc = "Prefetch halfwords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_vnum)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfh_vnum<const OP: svprfop, T>(pg: svbool_t, base: *const T, vnum: i64) {
+    svprfh::<OP, _>(pg, base.offset(svcnth() as isize * vnum as isize))
+}
+#[doc = "Prefetch words"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_vnum)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfw_vnum<const OP: svprfop, T>(pg: svbool_t, base: *const T, vnum: i64) {
+    svprfw::<OP, _>(pg, base.offset(svcntw() as isize * vnum as isize))
+}
+#[doc = "Prefetch doublewords"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_vnum)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))]
+pub unsafe fn svprfd_vnum<const OP: svprfop, T>(pg: svbool_t, base: *const T, vnum: i64) {
+    svprfd::<OP, _>(pg, base.offset(svcntd() as isize * vnum as isize))
+}
+#[doc = "Test whether any active element is true"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_any)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ptest))]
+pub fn svptest_any(pg: svbool_t, op: svbool_t) -> bool {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ptest.any.nxv16i1"
+        )]
+        fn _svptest_any(pg: svbool_t, op: svbool_t) -> bool;
+    }
+    unsafe { _svptest_any(pg, op) }
+}
+#[doc = "Test whether first active element is true"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_first)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ptest))]
+pub fn svptest_first(pg: svbool_t, op: svbool_t) -> bool {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ptest.first.nxv16i1"
+        )]
+        fn _svptest_first(pg: svbool_t, op: svbool_t) -> bool;
+    }
+    unsafe { _svptest_first(pg, op) }
+}
+#[doc = "Test whether last active element is true"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_last)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ptest))]
+pub fn svptest_last(pg: svbool_t, op: svbool_t) -> bool {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ptest.last.nxv16i1"
+        )]
+        fn _svptest_last(pg: svbool_t, op: svbool_t) -> bool;
+    }
+    unsafe { _svptest_last(pg, op) }
+}
+#[doc = "Set predicate elements to true"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ptrue))]
+pub fn svptrue_b8() -> svbool_t {
+    svptrue_pat_b8::<{ svpattern::SV_ALL }>()
+}
+#[doc = "Set predicate elements to true"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ptrue))]
+pub fn svptrue_b16() -> svbool_t {
+    svptrue_pat_b16::<{ svpattern::SV_ALL }>()
+}
+#[doc = "Set predicate elements to true"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ptrue))]
+pub fn svptrue_b32() -> svbool_t {
+    svptrue_pat_b32::<{ svpattern::SV_ALL }>()
+}
+#[doc = "Set predicate elements to true"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ptrue))]
+pub fn svptrue_b64() -> svbool_t {
+    svptrue_pat_b64::<{ svpattern::SV_ALL }>()
+}
+#[doc = "Set predicate elements to true"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))]
+pub fn svptrue_pat_b8<const PATTERN: svpattern>() -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv16i1")]
+        fn _svptrue_pat_b8(pattern: svpattern) -> svbool_t;
+    }
+    unsafe { _svptrue_pat_b8(PATTERN) }
+}
+#[doc = "Set predicate elements to true"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))]
+pub fn svptrue_pat_b16<const PATTERN: svpattern>() -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv8i1")]
+        fn _svptrue_pat_b16(pattern: svpattern) -> svbool8_t;
+    }
+    unsafe { _svptrue_pat_b16(PATTERN).into() }
+}
+#[doc = "Set predicate elements to true"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))]
+pub fn svptrue_pat_b32<const PATTERN: svpattern>() -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv4i1")]
+        fn _svptrue_pat_b32(pattern: svpattern) -> svbool4_t;
+    }
+    unsafe { _svptrue_pat_b32(PATTERN).into() }
+}
+#[doc = "Set predicate elements to true"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))]
+pub fn svptrue_pat_b64<const PATTERN: svpattern>() -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ptrue.nxv2i1")]
+        fn _svptrue_pat_b64(pattern: svpattern) -> svbool2_t;
+    }
+    unsafe { _svptrue_pat_b64(PATTERN).into() }
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqadd.x.nxv16i8"
+        )]
+        fn _svqadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqadd_s8(op1, op2) }
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_n_s8(op1: svint8_t, op2: i8) -> svint8_t {
+    svqadd_s8(op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqadd.x.nxv8i16"
+        )]
+        fn _svqadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqadd_s16(op1, op2) }
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_n_s16(op1: svint16_t, op2: i16) -> svint16_t {
+    svqadd_s16(op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqadd.x.nxv4i32"
+        )]
+        fn _svqadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqadd_s32(op1, op2) }
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_n_s32(op1: svint32_t, op2: i32) -> svint32_t {
+    svqadd_s32(op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqadd.x.nxv2i64"
+        )]
+        fn _svqadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqadd_s64(op1, op2) }
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_n_s64(op1: svint64_t, op2: i64) -> svint64_t {
+    svqadd_s64(op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqadd.x.nxv16i8"
+        )]
+        fn _svqadd_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqadd_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t {
+    svqadd_u8(op1, svdup_n_u8(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqadd.x.nxv8i16"
+        )]
+        fn _svqadd_u16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqadd_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t {
+    svqadd_u16(op1, svdup_n_u16(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqadd.x.nxv4i32"
+        )]
+        fn _svqadd_u32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqadd_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t {
+    svqadd_u32(op1, svdup_n_u32(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqadd.x.nxv2i64"
+        )]
+        fn _svqadd_u64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqadd_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t {
+    svqadd_u64(op1, svdup_n_u64(op2))
+}
+#[doc = "Saturating decrement by number of byte elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdecb, IMM_FACTOR = 1))]
+pub fn svqdecb_n_s32<const IMM_FACTOR: i32>(op: i32) -> i32 {
+    svqdecb_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))]
+pub fn svqdech_n_s32<const IMM_FACTOR: i32>(op: i32) -> i32 {
+    svqdech_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))]
+pub fn svqdecw_n_s32<const IMM_FACTOR: i32>(op: i32) -> i32 {
+    svqdecw_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))]
+pub fn svqdecd_n_s32<const IMM_FACTOR: i32>(op: i32) -> i32 {
+    svqdecd_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of byte elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdecb, IMM_FACTOR = 1))]
+pub fn svqdecb_n_s64<const IMM_FACTOR: i32>(op: i64) -> i64 {
+    svqdecb_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))]
+pub fn svqdech_n_s64<const IMM_FACTOR: i32>(op: i64) -> i64 {
+    svqdech_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))]
+pub fn svqdecw_n_s64<const IMM_FACTOR: i32>(op: i64) -> i64 {
+    svqdecw_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))]
+pub fn svqdecd_n_s64<const IMM_FACTOR: i32>(op: i64) -> i64 {
+    svqdecd_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of byte elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdecb, IMM_FACTOR = 1))]
+pub fn svqdecb_n_u32<const IMM_FACTOR: i32>(op: u32) -> u32 {
+    svqdecb_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))]
+pub fn svqdech_n_u32<const IMM_FACTOR: i32>(op: u32) -> u32 {
+    svqdech_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))]
+pub fn svqdecw_n_u32<const IMM_FACTOR: i32>(op: u32) -> u32 {
+    svqdecw_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))]
+pub fn svqdecd_n_u32<const IMM_FACTOR: i32>(op: u32) -> u32 {
+    svqdecd_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of byte elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdecb, IMM_FACTOR = 1))]
+pub fn svqdecb_n_u64<const IMM_FACTOR: i32>(op: u64) -> u64 {
+    svqdecb_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))]
+pub fn svqdech_n_u64<const IMM_FACTOR: i32>(op: u64) -> u64 {
+    svqdech_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))]
+pub fn svqdecw_n_u64<const IMM_FACTOR: i32>(op: u64) -> u64 {
+    svqdecw_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))]
+pub fn svqdecd_n_u64<const IMM_FACTOR: i32>(op: u64) -> u64 {
+    svqdecd_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of byte elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdecb_pat_n_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i32) -> i32 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecb.n32")]
+        fn _svqdecb_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
+    }
+    unsafe { _svqdecb_pat_n_s32(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating decrement by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdech_pat_n_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i32) -> i32 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.n32")]
+        fn _svqdech_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
+    }
+    unsafe { _svqdech_pat_n_s32(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating decrement by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdecw_pat_n_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i32) -> i32 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.n32")]
+        fn _svqdecw_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
+    }
+    unsafe { _svqdecw_pat_n_s32(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating decrement by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdecd_pat_n_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i32) -> i32 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.n32")]
+        fn _svqdecd_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
+    }
+    unsafe { _svqdecd_pat_n_s32(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating decrement by number of byte elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdecb_pat_n_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i64) -> i64 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecb.n64")]
+        fn _svqdecb_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
+    }
+    unsafe { _svqdecb_pat_n_s64(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating decrement by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdech_pat_n_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i64) -> i64 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.n64")]
+        fn _svqdech_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
+    }
+    unsafe { _svqdech_pat_n_s64(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating decrement by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdecw_pat_n_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i64) -> i64 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.n64")]
+        fn _svqdecw_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
+    }
+    unsafe { _svqdecw_pat_n_s64(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating decrement by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdecd_pat_n_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i64) -> i64 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.n64")]
+        fn _svqdecd_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
+    }
+    unsafe { _svqdecd_pat_n_s64(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating decrement by number of byte elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdecb_pat_n_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u32) -> u32 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecb.n32")]
+        fn _svqdecb_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
+    }
+    unsafe { _svqdecb_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating decrement by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdech_pat_n_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u32) -> u32 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.n32")]
+        fn _svqdech_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
+    }
+    unsafe { _svqdech_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating decrement by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdecw_pat_n_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u32) -> u32 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.n32")]
+        fn _svqdecw_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
+    }
+    unsafe { _svqdecw_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating decrement by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdecd_pat_n_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u32) -> u32 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.n32")]
+        fn _svqdecd_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
+    }
+    unsafe { _svqdecd_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating decrement by number of byte elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdecb_pat_n_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u64) -> u64 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecb.n64")]
+        fn _svqdecb_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
+    }
+    unsafe { _svqdecb_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating decrement by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdech_pat_n_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u64) -> u64 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.n64")]
+        fn _svqdech_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
+    }
+    unsafe { _svqdech_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating decrement by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdecw_pat_n_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u64) -> u64 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.n64")]
+        fn _svqdecw_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
+    }
+    unsafe { _svqdecw_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating decrement by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdecd_pat_n_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u64) -> u64 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.n64")]
+        fn _svqdecd_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
+    }
+    unsafe { _svqdecd_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating decrement by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdech_pat_s16<const PATTERN: svpattern, const IMM_FACTOR: i32>(
+    op: svint16_t,
+) -> svint16_t {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdech.nxv8i16")]
+        fn _svqdech_pat_s16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t;
+    }
+    unsafe { _svqdech_pat_s16(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating decrement by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdecw_pat_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(
+    op: svint32_t,
+) -> svint32_t {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecw.nxv4i32")]
+        fn _svqdecw_pat_s32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t;
+    }
+    unsafe { _svqdecw_pat_s32(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating decrement by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdecd_pat_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(
+    op: svint64_t,
+) -> svint64_t {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecd.nxv2i64")]
+        fn _svqdecd_pat_s64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t;
+    }
+    unsafe { _svqdecd_pat_s64(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating decrement by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdech_pat_u16<const PATTERN: svpattern, const IMM_FACTOR: i32>(
+    op: svuint16_t,
+) -> svuint16_t {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdech.nxv8i16")]
+        fn _svqdech_pat_u16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t;
+    }
+    unsafe { _svqdech_pat_u16(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating decrement by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdecw_pat_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(
+    op: svuint32_t,
+) -> svuint32_t {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecw.nxv4i32")]
+        fn _svqdecw_pat_u32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t;
+    }
+    unsafe { _svqdecw_pat_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating decrement by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqdecd_pat_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(
+    op: svuint64_t,
+) -> svuint64_t {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecd.nxv2i64")]
+        fn _svqdecd_pat_u64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t;
+    }
+    unsafe { _svqdecd_pat_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating decrement by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))]
+pub fn svqdech_s16<const IMM_FACTOR: i32>(op: svint16_t) -> svint16_t {
+    svqdech_pat_s16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))]
+pub fn svqdecw_s32<const IMM_FACTOR: i32>(op: svint32_t) -> svint32_t {
+    svqdecw_pat_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))]
+pub fn svqdecd_s64<const IMM_FACTOR: i32>(op: svint64_t) -> svint64_t {
+    svqdecd_pat_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))]
+pub fn svqdech_u16<const IMM_FACTOR: i32>(op: svuint16_t) -> svuint16_t {
+    svqdech_pat_u16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))]
+pub fn svqdecw_u32<const IMM_FACTOR: i32>(op: svuint32_t) -> svuint32_t {
+    svqdecw_pat_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))]
+pub fn svqdecd_u64<const IMM_FACTOR: i32>(op: svuint64_t) -> svuint64_t {
+    svqdecd_pat_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdecp))]
+pub fn svqdecp_n_s32_b8(op: i32, pg: svbool_t) -> i32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdecp.n32.nxv16i1"
+        )]
+        fn _svqdecp_n_s32_b8(op: i32, pg: svbool_t) -> i32;
+    }
+    unsafe { _svqdecp_n_s32_b8(op, pg) }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdecp))]
+pub fn svqdecp_n_s32_b16(op: i32, pg: svbool_t) -> i32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdecp.n32.nxv8i1"
+        )]
+        fn _svqdecp_n_s32_b16(op: i32, pg: svbool8_t) -> i32;
+    }
+    unsafe { _svqdecp_n_s32_b16(op, pg.into()) }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdecp))]
+pub fn svqdecp_n_s32_b32(op: i32, pg: svbool_t) -> i32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdecp.n32.nxv4i1"
+        )]
+        fn _svqdecp_n_s32_b32(op: i32, pg: svbool4_t) -> i32;
+    }
+    unsafe { _svqdecp_n_s32_b32(op, pg.into()) }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdecp))]
+pub fn svqdecp_n_s32_b64(op: i32, pg: svbool_t) -> i32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdecp.n32.nxv2i1"
+        )]
+        fn _svqdecp_n_s32_b64(op: i32, pg: svbool2_t) -> i32;
+    }
+    unsafe { _svqdecp_n_s32_b64(op, pg.into()) }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdecp))]
+pub fn svqdecp_n_s64_b8(op: i64, pg: svbool_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdecp.n64.nxv16i1"
+        )]
+        fn _svqdecp_n_s64_b8(op: i64, pg: svbool_t) -> i64;
+    }
+    unsafe { _svqdecp_n_s64_b8(op, pg) }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdecp))]
+pub fn svqdecp_n_s64_b16(op: i64, pg: svbool_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdecp.n64.nxv8i1"
+        )]
+        fn _svqdecp_n_s64_b16(op: i64, pg: svbool8_t) -> i64;
+    }
+    unsafe { _svqdecp_n_s64_b16(op, pg.into()) }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdecp))]
+pub fn svqdecp_n_s64_b32(op: i64, pg: svbool_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdecp.n64.nxv4i1"
+        )]
+        fn _svqdecp_n_s64_b32(op: i64, pg: svbool4_t) -> i64;
+    }
+    unsafe { _svqdecp_n_s64_b32(op, pg.into()) }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdecp))]
+pub fn svqdecp_n_s64_b64(op: i64, pg: svbool_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdecp.n64.nxv2i1"
+        )]
+        fn _svqdecp_n_s64_b64(op: i64, pg: svbool2_t) -> i64;
+    }
+    unsafe { _svqdecp_n_s64_b64(op, pg.into()) }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdecp))]
+pub fn svqdecp_n_u32_b8(op: u32, pg: svbool_t) -> u32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqdecp.n32.nxv16i1"
+        )]
+        fn _svqdecp_n_u32_b8(op: i32, pg: svbool_t) -> i32;
+    }
+    unsafe { _svqdecp_n_u32_b8(op.as_signed(), pg).as_unsigned() }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdecp))]
+pub fn svqdecp_n_u32_b16(op: u32, pg: svbool_t) -> u32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqdecp.n32.nxv8i1"
+        )]
+        fn _svqdecp_n_u32_b16(op: i32, pg: svbool8_t) -> i32;
+    }
+    unsafe { _svqdecp_n_u32_b16(op.as_signed(), pg.into()).as_unsigned() }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdecp))]
+pub fn svqdecp_n_u32_b32(op: u32, pg: svbool_t) -> u32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqdecp.n32.nxv4i1"
+        )]
+        fn _svqdecp_n_u32_b32(op: i32, pg: svbool4_t) -> i32;
+    }
+    unsafe { _svqdecp_n_u32_b32(op.as_signed(), pg.into()).as_unsigned() }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdecp))]
+pub fn svqdecp_n_u32_b64(op: u32, pg: svbool_t) -> u32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqdecp.n32.nxv2i1"
+        )]
+        fn _svqdecp_n_u32_b64(op: i32, pg: svbool2_t) -> i32;
+    }
+    unsafe { _svqdecp_n_u32_b64(op.as_signed(), pg.into()).as_unsigned() }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdecp))]
+pub fn svqdecp_n_u64_b8(op: u64, pg: svbool_t) -> u64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqdecp.n64.nxv16i1"
+        )]
+        fn _svqdecp_n_u64_b8(op: i64, pg: svbool_t) -> i64;
+    }
+    unsafe { _svqdecp_n_u64_b8(op.as_signed(), pg).as_unsigned() }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdecp))]
+pub fn svqdecp_n_u64_b16(op: u64, pg: svbool_t) -> u64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqdecp.n64.nxv8i1"
+        )]
+        fn _svqdecp_n_u64_b16(op: i64, pg: svbool8_t) -> i64;
+    }
+    unsafe { _svqdecp_n_u64_b16(op.as_signed(), pg.into()).as_unsigned() }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdecp))]
+pub fn svqdecp_n_u64_b32(op: u64, pg: svbool_t) -> u64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqdecp.n64.nxv4i1"
+        )]
+        fn _svqdecp_n_u64_b32(op: i64, pg: svbool4_t) -> i64;
+    }
+    unsafe { _svqdecp_n_u64_b32(op.as_signed(), pg.into()).as_unsigned() }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdecp))]
+pub fn svqdecp_n_u64_b64(op: u64, pg: svbool_t) -> u64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqdecp.n64.nxv2i1"
+        )]
+        fn _svqdecp_n_u64_b64(op: i64, pg: svbool2_t) -> i64;
+    }
+    unsafe { _svqdecp_n_u64_b64(op.as_signed(), pg.into()).as_unsigned() }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdecp))]
+pub fn svqdecp_s16(op: svint16_t, pg: svbool_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv8i16")]
+        fn _svqdecp_s16(op: svint16_t, pg: svbool8_t) -> svint16_t;
+    }
+    unsafe { _svqdecp_s16(op, pg.into()) }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdecp))]
+pub fn svqdecp_s32(op: svint32_t, pg: svbool_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv4i32")]
+        fn _svqdecp_s32(op: svint32_t, pg: svbool4_t) -> svint32_t;
+    }
+    unsafe { _svqdecp_s32(op, pg.into()) }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqdecp))]
+pub fn svqdecp_s64(op: svint64_t, pg: svbool_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqdecp.nxv2i64")]
+        fn _svqdecp_s64(op: svint64_t, pg: svbool2_t) -> svint64_t;
+    }
+    unsafe { _svqdecp_s64(op, pg.into()) }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdecp))]
+pub fn svqdecp_u16(op: svuint16_t, pg: svbool_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv8i16")]
+        fn _svqdecp_u16(op: svint16_t, pg: svbool8_t) -> svint16_t;
+    }
+    unsafe { _svqdecp_u16(op.as_signed(), pg.into()).as_unsigned() }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdecp))]
+pub fn svqdecp_u32(op: svuint32_t, pg: svbool_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv4i32")]
+        fn _svqdecp_u32(op: svint32_t, pg: svbool4_t) -> svint32_t;
+    }
+    unsafe { _svqdecp_u32(op.as_signed(), pg.into()).as_unsigned() }
+}
+#[doc = "Saturating decrement by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqdecp))]
+pub fn svqdecp_u64(op: svuint64_t, pg: svbool_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqdecp.nxv2i64")]
+        fn _svqdecp_u64(op: svint64_t, pg: svbool2_t) -> svint64_t;
+    }
+    unsafe { _svqdecp_u64(op.as_signed(), pg.into()).as_unsigned() }
+}
+#[doc = "Saturating increment by number of byte elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqincb, IMM_FACTOR = 1))]
+pub fn svqincb_n_s32<const IMM_FACTOR: i32>(op: i32) -> i32 {
+    svqincb_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))]
+pub fn svqinch_n_s32<const IMM_FACTOR: i32>(op: i32) -> i32 {
+    svqinch_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))]
+pub fn svqincw_n_s32<const IMM_FACTOR: i32>(op: i32) -> i32 {
+    svqincw_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))]
+pub fn svqincd_n_s32<const IMM_FACTOR: i32>(op: i32) -> i32 {
+    svqincd_pat_n_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of byte elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqincb, IMM_FACTOR = 1))]
+pub fn svqincb_n_s64<const IMM_FACTOR: i32>(op: i64) -> i64 {
+    svqincb_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))]
+pub fn svqinch_n_s64<const IMM_FACTOR: i32>(op: i64) -> i64 {
+    svqinch_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))]
+pub fn svqincw_n_s64<const IMM_FACTOR: i32>(op: i64) -> i64 {
+    svqincw_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))]
+pub fn svqincd_n_s64<const IMM_FACTOR: i32>(op: i64) -> i64 {
+    svqincd_pat_n_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of byte elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqincb, IMM_FACTOR = 1))]
+pub fn svqincb_n_u32<const IMM_FACTOR: i32>(op: u32) -> u32 {
+    svqincb_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))]
+pub fn svqinch_n_u32<const IMM_FACTOR: i32>(op: u32) -> u32 {
+    svqinch_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))]
+pub fn svqincw_n_u32<const IMM_FACTOR: i32>(op: u32) -> u32 {
+    svqincw_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))]
+pub fn svqincd_n_u32<const IMM_FACTOR: i32>(op: u32) -> u32 {
+    svqincd_pat_n_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of byte elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqincb, IMM_FACTOR = 1))]
+pub fn svqincb_n_u64<const IMM_FACTOR: i32>(op: u64) -> u64 {
+    svqincb_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))]
+pub fn svqinch_n_u64<const IMM_FACTOR: i32>(op: u64) -> u64 {
+    svqinch_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))]
+pub fn svqincw_n_u64<const IMM_FACTOR: i32>(op: u64) -> u64 {
+    svqincw_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))]
+pub fn svqincd_n_u64<const IMM_FACTOR: i32>(op: u64) -> u64 {
+    svqincd_pat_n_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of byte elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqincb_pat_n_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i32) -> i32 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincb.n32")]
+        fn _svqincb_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
+    }
+    unsafe { _svqincb_pat_n_s32(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating increment by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqinch_pat_n_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i32) -> i32 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.n32")]
+        fn _svqinch_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
+    }
+    unsafe { _svqinch_pat_n_s32(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating increment by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqincw_pat_n_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i32) -> i32 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.n32")]
+        fn _svqincw_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
+    }
+    unsafe { _svqincw_pat_n_s32(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating increment by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqincd_pat_n_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i32) -> i32 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.n32")]
+        fn _svqincd_pat_n_s32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
+    }
+    unsafe { _svqincd_pat_n_s32(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating increment by number of byte elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqincb_pat_n_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i64) -> i64 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincb.n64")]
+        fn _svqincb_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
+    }
+    unsafe { _svqincb_pat_n_s64(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating increment by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqinch_pat_n_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i64) -> i64 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.n64")]
+        fn _svqinch_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
+    }
+    unsafe { _svqinch_pat_n_s64(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating increment by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqincw_pat_n_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i64) -> i64 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.n64")]
+        fn _svqincw_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
+    }
+    unsafe { _svqincw_pat_n_s64(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating increment by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqincd_pat_n_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: i64) -> i64 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.n64")]
+        fn _svqincd_pat_n_s64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
+    }
+    unsafe { _svqincd_pat_n_s64(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating increment by number of byte elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqincb_pat_n_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u32) -> u32 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincb.n32")]
+        fn _svqincb_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
+    }
+    unsafe { _svqincb_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating increment by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqinch_pat_n_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u32) -> u32 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.n32")]
+        fn _svqinch_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
+    }
+    unsafe { _svqinch_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating increment by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqincw_pat_n_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u32) -> u32 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.n32")]
+        fn _svqincw_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
+    }
+    unsafe { _svqincw_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating increment by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqincd_pat_n_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u32) -> u32 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.n32")]
+        fn _svqincd_pat_n_u32(op: i32, pattern: svpattern, imm_factor: i32) -> i32;
+    }
+    unsafe { _svqincd_pat_n_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating increment by number of byte elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqincb_pat_n_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u64) -> u64 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincb.n64")]
+        fn _svqincb_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
+    }
+    unsafe { _svqincb_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating increment by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqinch_pat_n_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u64) -> u64 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.n64")]
+        fn _svqinch_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
+    }
+    unsafe { _svqinch_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating increment by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqincw_pat_n_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u64) -> u64 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.n64")]
+        fn _svqincw_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
+    }
+    unsafe { _svqincw_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating increment by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqincd_pat_n_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(op: u64) -> u64 {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.n64")]
+        fn _svqincd_pat_n_u64(op: i64, pattern: svpattern, imm_factor: i32) -> i64;
+    }
+    unsafe { _svqincd_pat_n_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating increment by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqinch_pat_s16<const PATTERN: svpattern, const IMM_FACTOR: i32>(
+    op: svint16_t,
+) -> svint16_t {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqinch.nxv8i16")]
+        fn _svqinch_pat_s16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t;
+    }
+    unsafe { _svqinch_pat_s16(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating increment by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqincw_pat_s32<const PATTERN: svpattern, const IMM_FACTOR: i32>(
+    op: svint32_t,
+) -> svint32_t {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincw.nxv4i32")]
+        fn _svqincw_pat_s32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t;
+    }
+    unsafe { _svqincw_pat_s32(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating increment by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqincd_pat_s64<const PATTERN: svpattern, const IMM_FACTOR: i32>(
+    op: svint64_t,
+) -> svint64_t {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincd.nxv2i64")]
+        fn _svqincd_pat_s64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t;
+    }
+    unsafe { _svqincd_pat_s64(op, PATTERN, IMM_FACTOR) }
+}
+#[doc = "Saturating increment by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqinch_pat_u16<const PATTERN: svpattern, const IMM_FACTOR: i32>(
+    op: svuint16_t,
+) -> svuint16_t {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqinch.nxv8i16")]
+        fn _svqinch_pat_u16(op: svint16_t, pattern: svpattern, imm_factor: i32) -> svint16_t;
+    }
+    unsafe { _svqinch_pat_u16(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating increment by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqincw_pat_u32<const PATTERN: svpattern, const IMM_FACTOR: i32>(
+    op: svuint32_t,
+) -> svuint32_t {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincw.nxv4i32")]
+        fn _svqincw_pat_u32(op: svint32_t, pattern: svpattern, imm_factor: i32) -> svint32_t;
+    }
+    unsafe { _svqincw_pat_u32(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating increment by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+# [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))]
+pub fn svqincd_pat_u64<const PATTERN: svpattern, const IMM_FACTOR: i32>(
+    op: svuint64_t,
+) -> svuint64_t {
+    static_assert_range!(IMM_FACTOR, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincd.nxv2i64")]
+        fn _svqincd_pat_u64(op: svint64_t, pattern: svpattern, imm_factor: i32) -> svint64_t;
+    }
+    unsafe { _svqincd_pat_u64(op.as_signed(), PATTERN, IMM_FACTOR).as_unsigned() }
+}
+#[doc = "Saturating increment by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))]
+pub fn svqinch_s16<const IMM_FACTOR: i32>(op: svint16_t) -> svint16_t {
+    svqinch_pat_s16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))]
+pub fn svqincw_s32<const IMM_FACTOR: i32>(op: svint32_t) -> svint32_t {
+    svqincw_pat_s32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))]
+pub fn svqincd_s64<const IMM_FACTOR: i32>(op: svint64_t) -> svint64_t {
+    svqincd_pat_s64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of halfword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))]
+pub fn svqinch_u16<const IMM_FACTOR: i32>(op: svuint16_t) -> svuint16_t {
+    svqinch_pat_u16::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of word elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))]
+pub fn svqincw_u32<const IMM_FACTOR: i32>(op: svuint32_t) -> svuint32_t {
+    svqincw_pat_u32::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by number of doubleword elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))]
+pub fn svqincd_u64<const IMM_FACTOR: i32>(op: svuint64_t) -> svuint64_t {
+    svqincd_pat_u64::<{ svpattern::SV_ALL }, IMM_FACTOR>(op)
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqincp))]
+pub fn svqincp_n_s32_b8(op: i32, pg: svbool_t) -> i32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqincp.n32.nxv16i1"
+        )]
+        fn _svqincp_n_s32_b8(op: i32, pg: svbool_t) -> i32;
+    }
+    unsafe { _svqincp_n_s32_b8(op, pg) }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqincp))]
+pub fn svqincp_n_s32_b16(op: i32, pg: svbool_t) -> i32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqincp.n32.nxv8i1"
+        )]
+        fn _svqincp_n_s32_b16(op: i32, pg: svbool8_t) -> i32;
+    }
+    unsafe { _svqincp_n_s32_b16(op, pg.into()) }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqincp))]
+pub fn svqincp_n_s32_b32(op: i32, pg: svbool_t) -> i32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqincp.n32.nxv4i1"
+        )]
+        fn _svqincp_n_s32_b32(op: i32, pg: svbool4_t) -> i32;
+    }
+    unsafe { _svqincp_n_s32_b32(op, pg.into()) }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqincp))]
+pub fn svqincp_n_s32_b64(op: i32, pg: svbool_t) -> i32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqincp.n32.nxv2i1"
+        )]
+        fn _svqincp_n_s32_b64(op: i32, pg: svbool2_t) -> i32;
+    }
+    unsafe { _svqincp_n_s32_b64(op, pg.into()) }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqincp))]
+pub fn svqincp_n_s64_b8(op: i64, pg: svbool_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqincp.n64.nxv16i1"
+        )]
+        fn _svqincp_n_s64_b8(op: i64, pg: svbool_t) -> i64;
+    }
+    unsafe { _svqincp_n_s64_b8(op, pg) }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqincp))]
+pub fn svqincp_n_s64_b16(op: i64, pg: svbool_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqincp.n64.nxv8i1"
+        )]
+        fn _svqincp_n_s64_b16(op: i64, pg: svbool8_t) -> i64;
+    }
+    unsafe { _svqincp_n_s64_b16(op, pg.into()) }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqincp))]
+pub fn svqincp_n_s64_b32(op: i64, pg: svbool_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqincp.n64.nxv4i1"
+        )]
+        fn _svqincp_n_s64_b32(op: i64, pg: svbool4_t) -> i64;
+    }
+    unsafe { _svqincp_n_s64_b32(op, pg.into()) }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqincp))]
+pub fn svqincp_n_s64_b64(op: i64, pg: svbool_t) -> i64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqincp.n64.nxv2i1"
+        )]
+        fn _svqincp_n_s64_b64(op: i64, pg: svbool2_t) -> i64;
+    }
+    unsafe { _svqincp_n_s64_b64(op, pg.into()) }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqincp))]
+pub fn svqincp_n_u32_b8(op: u32, pg: svbool_t) -> u32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqincp.n32.nxv16i1"
+        )]
+        fn _svqincp_n_u32_b8(op: i32, pg: svbool_t) -> i32;
+    }
+    unsafe { _svqincp_n_u32_b8(op.as_signed(), pg).as_unsigned() }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqincp))]
+pub fn svqincp_n_u32_b16(op: u32, pg: svbool_t) -> u32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqincp.n32.nxv8i1"
+        )]
+        fn _svqincp_n_u32_b16(op: i32, pg: svbool8_t) -> i32;
+    }
+    unsafe { _svqincp_n_u32_b16(op.as_signed(), pg.into()).as_unsigned() }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqincp))]
+pub fn svqincp_n_u32_b32(op: u32, pg: svbool_t) -> u32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqincp.n32.nxv4i1"
+        )]
+        fn _svqincp_n_u32_b32(op: i32, pg: svbool4_t) -> i32;
+    }
+    unsafe { _svqincp_n_u32_b32(op.as_signed(), pg.into()).as_unsigned() }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqincp))]
+pub fn svqincp_n_u32_b64(op: u32, pg: svbool_t) -> u32 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqincp.n32.nxv2i1"
+        )]
+        fn _svqincp_n_u32_b64(op: i32, pg: svbool2_t) -> i32;
+    }
+    unsafe { _svqincp_n_u32_b64(op.as_signed(), pg.into()).as_unsigned() }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqincp))]
+pub fn svqincp_n_u64_b8(op: u64, pg: svbool_t) -> u64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqincp.n64.nxv16i1"
+        )]
+        fn _svqincp_n_u64_b8(op: i64, pg: svbool_t) -> i64;
+    }
+    unsafe { _svqincp_n_u64_b8(op.as_signed(), pg).as_unsigned() }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqincp))]
+pub fn svqincp_n_u64_b16(op: u64, pg: svbool_t) -> u64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqincp.n64.nxv8i1"
+        )]
+        fn _svqincp_n_u64_b16(op: i64, pg: svbool8_t) -> i64;
+    }
+    unsafe { _svqincp_n_u64_b16(op.as_signed(), pg.into()).as_unsigned() }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqincp))]
+pub fn svqincp_n_u64_b32(op: u64, pg: svbool_t) -> u64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqincp.n64.nxv4i1"
+        )]
+        fn _svqincp_n_u64_b32(op: i64, pg: svbool4_t) -> i64;
+    }
+    unsafe { _svqincp_n_u64_b32(op.as_signed(), pg.into()).as_unsigned() }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqincp))]
+pub fn svqincp_n_u64_b64(op: u64, pg: svbool_t) -> u64 {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqincp.n64.nxv2i1"
+        )]
+        fn _svqincp_n_u64_b64(op: i64, pg: svbool2_t) -> i64;
+    }
+    unsafe { _svqincp_n_u64_b64(op.as_signed(), pg.into()).as_unsigned() }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqincp))]
+pub fn svqincp_s16(op: svint16_t, pg: svbool_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv8i16")]
+        fn _svqincp_s16(op: svint16_t, pg: svbool8_t) -> svint16_t;
+    }
+    unsafe { _svqincp_s16(op, pg.into()) }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqincp))]
+pub fn svqincp_s32(op: svint32_t, pg: svbool_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv4i32")]
+        fn _svqincp_s32(op: svint32_t, pg: svbool4_t) -> svint32_t;
+    }
+    unsafe { _svqincp_s32(op, pg.into()) }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqincp))]
+pub fn svqincp_s64(op: svint64_t, pg: svbool_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqincp.nxv2i64")]
+        fn _svqincp_s64(op: svint64_t, pg: svbool2_t) -> svint64_t;
+    }
+    unsafe { _svqincp_s64(op, pg.into()) }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqincp))]
+pub fn svqincp_u16(op: svuint16_t, pg: svbool_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv8i16")]
+        fn _svqincp_u16(op: svint16_t, pg: svbool8_t) -> svint16_t;
+    }
+    unsafe { _svqincp_u16(op.as_signed(), pg.into()).as_unsigned() }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqincp))]
+pub fn svqincp_u32(op: svuint32_t, pg: svbool_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv4i32")]
+        fn _svqincp_u32(op: svint32_t, pg: svbool4_t) -> svint32_t;
+    }
+    unsafe { _svqincp_u32(op.as_signed(), pg.into()).as_unsigned() }
+}
+#[doc = "Saturating increment by active element count"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqincp))]
+pub fn svqincp_u64(op: svuint64_t, pg: svbool_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqincp.nxv2i64")]
+        fn _svqincp_u64(op: svint64_t, pg: svbool2_t) -> svint64_t;
+    }
+    unsafe { _svqincp_u64(op.as_signed(), pg.into()).as_unsigned() }
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqsub.x.nxv16i8"
+        )]
+        fn _svqsub_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqsub_s8(op1, op2) }
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_n_s8(op1: svint8_t, op2: i8) -> svint8_t {
+    svqsub_s8(op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqsub.x.nxv8i16"
+        )]
+        fn _svqsub_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqsub_s16(op1, op2) }
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_n_s16(op1: svint16_t, op2: i16) -> svint16_t {
+    svqsub_s16(op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqsub.x.nxv4i32"
+        )]
+        fn _svqsub_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqsub_s32(op1, op2) }
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_n_s32(op1: svint32_t, op2: i32) -> svint32_t {
+    svqsub_s32(op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqsub.x.nxv2i64"
+        )]
+        fn _svqsub_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqsub_s64(op1, op2) }
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_n_s64(op1: svint64_t, op2: i64) -> svint64_t {
+    svqsub_s64(op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqsub.x.nxv16i8"
+        )]
+        fn _svqsub_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqsub_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t {
+    svqsub_u8(op1, svdup_n_u8(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqsub.x.nxv8i16"
+        )]
+        fn _svqsub_u16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqsub_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t {
+    svqsub_u16(op1, svdup_n_u16(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqsub.x.nxv4i32"
+        )]
+        fn _svqsub_u32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqsub_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t {
+    svqsub_u32(op1, svdup_n_u32(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqsub.x.nxv2i64"
+        )]
+        fn _svqsub_u64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqsub_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t {
+    svqsub_u64(op1, svdup_n_u64(op2))
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv16i8")]
+        fn _svrbit_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
+    }
+    unsafe { _svrbit_s8_m(inactive, pg, op) }
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t {
+    svrbit_s8_m(op, pg, op)
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t {
+    svrbit_s8_m(svdup_n_s8(0), pg, op)
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv8i16")]
+        fn _svrbit_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
+    }
+    unsafe { _svrbit_s16_m(inactive, pg.into(), op) }
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t {
+    svrbit_s16_m(op, pg, op)
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t {
+    svrbit_s16_m(svdup_n_s16(0), pg, op)
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv4i32")]
+        fn _svrbit_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svrbit_s32_m(inactive, pg.into(), op) }
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svrbit_s32_m(op, pg, op)
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svrbit_s32_m(svdup_n_s32(0), pg, op)
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rbit.nxv2i64")]
+        fn _svrbit_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svrbit_s64_m(inactive, pg.into(), op) }
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svrbit_s64_m(op, pg, op)
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svrbit_s64_m(svdup_n_s64(0), pg, op)
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t {
+    unsafe { svrbit_s8_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t {
+    svrbit_u8_m(op, pg, op)
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t {
+    svrbit_u8_m(svdup_n_u8(0), pg, op)
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    unsafe { svrbit_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    svrbit_u16_m(op, pg, op)
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    svrbit_u16_m(svdup_n_u16(0), pg, op)
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    unsafe { svrbit_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svrbit_u32_m(op, pg, op)
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svrbit_u32_m(svdup_n_u32(0), pg, op)
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    unsafe { svrbit_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svrbit_u64_m(op, pg, op)
+}
+#[doc = "Reverse bits"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rbit))]
+pub fn svrbit_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svrbit_u64_m(svdup_n_u64(0), pg, op)
+}
+#[doc = "Read FFR, returning predicate of succesfully loaded elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrdffr)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rdffr))]
+pub fn svrdffr() -> svbool_t {
+    svrdffr_z(svptrue_b8())
+}
+#[doc = "Read FFR, returning predicate of succesfully loaded elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrdffr_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rdffr))]
+pub fn svrdffr_z(pg: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rdffr.z")]
+        fn _svrdffr_z(pg: svbool_t) -> svbool_t;
+    }
+    unsafe { _svrdffr_z(pg) }
+}
+#[doc = "Reciprocal estimate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frecpe))]
+pub fn svrecpe_f32(op: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.frecpe.x.nxv4f32"
+        )]
+        fn _svrecpe_f32(op: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svrecpe_f32(op) }
+}
+#[doc = "Reciprocal estimate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frecpe))]
+pub fn svrecpe_f64(op: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.frecpe.x.nxv2f64"
+        )]
+        fn _svrecpe_f64(op: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svrecpe_f64(op) }
+}
+#[doc = "Reciprocal step"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frecps))]
+pub fn svrecps_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.frecps.x.nxv4f32"
+        )]
+        fn _svrecps_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svrecps_f32(op1, op2) }
+}
+#[doc = "Reciprocal step"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frecps))]
+pub fn svrecps_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.frecps.x.nxv2f64"
+        )]
+        fn _svrecps_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svrecps_f64(op1, op2) }
+}
+#[doc = "Reciprocal exponent"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frecpx))]
+pub fn svrecpx_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.frecpx.x.nxv4f32"
+        )]
+        fn _svrecpx_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svrecpx_f32_m(inactive, pg.into(), op) }
+}
+#[doc = "Reciprocal exponent"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frecpx))]
+pub fn svrecpx_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svrecpx_f32_m(op, pg, op)
+}
+#[doc = "Reciprocal exponent"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frecpx))]
+pub fn svrecpx_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svrecpx_f32_m(svdup_n_f32(0.0), pg, op)
+}
+#[doc = "Reciprocal exponent"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frecpx))]
+pub fn svrecpx_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.frecpx.x.nxv2f64"
+        )]
+        fn _svrecpx_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svrecpx_f64_m(inactive, pg.into(), op) }
+}
+#[doc = "Reciprocal exponent"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frecpx))]
+pub fn svrecpx_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svrecpx_f64_m(op, pg, op)
+}
+#[doc = "Reciprocal exponent"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frecpx))]
+pub fn svrecpx_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svrecpx_f64_m(svdup_n_f64(0.0), pg, op)
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f32_f32(op: svfloat32_t) -> svfloat32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f32_f64(op: svfloat64_t) -> svfloat32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f32_s8(op: svint8_t) -> svfloat32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f32_s16(op: svint16_t) -> svfloat32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f32_s32(op: svint32_t) -> svfloat32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f32_s64(op: svint64_t) -> svfloat32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f32_u8(op: svuint8_t) -> svfloat32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f32_u16(op: svuint16_t) -> svfloat32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f32_u32(op: svuint32_t) -> svfloat32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f32_u64(op: svuint64_t) -> svfloat32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f64_f32(op: svfloat32_t) -> svfloat64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f64_f64(op: svfloat64_t) -> svfloat64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f64_s8(op: svint8_t) -> svfloat64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f64_s16(op: svint16_t) -> svfloat64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f64_s32(op: svint32_t) -> svfloat64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f64_s64(op: svint64_t) -> svfloat64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f64_u8(op: svuint8_t) -> svfloat64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f64_u16(op: svuint16_t) -> svfloat64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f64_u32(op: svuint32_t) -> svfloat64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_f64_u64(op: svuint64_t) -> svfloat64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s8_f32(op: svfloat32_t) -> svint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s8_f64(op: svfloat64_t) -> svint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s8_s8(op: svint8_t) -> svint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s8_s16(op: svint16_t) -> svint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s8_s32(op: svint32_t) -> svint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s8_s64(op: svint64_t) -> svint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s8_u8(op: svuint8_t) -> svint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s8_u16(op: svuint16_t) -> svint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s8_u32(op: svuint32_t) -> svint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s8_u64(op: svuint64_t) -> svint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s16_f32(op: svfloat32_t) -> svint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s16_f64(op: svfloat64_t) -> svint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s16_s8(op: svint8_t) -> svint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s16_s16(op: svint16_t) -> svint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s16_s32(op: svint32_t) -> svint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s16_s64(op: svint64_t) -> svint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s16_u8(op: svuint8_t) -> svint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s16_u16(op: svuint16_t) -> svint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s16_u32(op: svuint32_t) -> svint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s16_u64(op: svuint64_t) -> svint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s32_f32(op: svfloat32_t) -> svint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s32_f64(op: svfloat64_t) -> svint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s32_s8(op: svint8_t) -> svint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s32_s16(op: svint16_t) -> svint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s32_s32(op: svint32_t) -> svint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s32_s64(op: svint64_t) -> svint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s32_u8(op: svuint8_t) -> svint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s32_u16(op: svuint16_t) -> svint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s32_u32(op: svuint32_t) -> svint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s32_u64(op: svuint64_t) -> svint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s64_f32(op: svfloat32_t) -> svint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s64_f64(op: svfloat64_t) -> svint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s64_s8(op: svint8_t) -> svint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s64_s16(op: svint16_t) -> svint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s64_s32(op: svint32_t) -> svint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s64_s64(op: svint64_t) -> svint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s64_u8(op: svuint8_t) -> svint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s64_u16(op: svuint16_t) -> svint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s64_u32(op: svuint32_t) -> svint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_s64_u64(op: svuint64_t) -> svint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u8_f32(op: svfloat32_t) -> svuint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u8_f64(op: svfloat64_t) -> svuint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u8_s8(op: svint8_t) -> svuint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u8_s16(op: svint16_t) -> svuint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u8_s32(op: svint32_t) -> svuint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u8_s64(op: svint64_t) -> svuint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u8_u8(op: svuint8_t) -> svuint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u8_u16(op: svuint16_t) -> svuint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u8_u32(op: svuint32_t) -> svuint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u8_u64(op: svuint64_t) -> svuint8_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u16_f32(op: svfloat32_t) -> svuint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u16_f64(op: svfloat64_t) -> svuint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u16_s8(op: svint8_t) -> svuint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u16_s16(op: svint16_t) -> svuint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u16_s32(op: svint32_t) -> svuint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u16_s64(op: svint64_t) -> svuint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u16_u8(op: svuint8_t) -> svuint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u16_u16(op: svuint16_t) -> svuint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u16_u32(op: svuint32_t) -> svuint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u16_u64(op: svuint64_t) -> svuint16_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u32_f32(op: svfloat32_t) -> svuint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u32_f64(op: svfloat64_t) -> svuint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u32_s8(op: svint8_t) -> svuint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u32_s16(op: svint16_t) -> svuint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u32_s32(op: svint32_t) -> svuint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u32_s64(op: svint64_t) -> svuint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u32_u8(op: svuint8_t) -> svuint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u32_u16(op: svuint16_t) -> svuint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u32_u32(op: svuint32_t) -> svuint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u32_u64(op: svuint64_t) -> svuint32_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u64_f32(op: svfloat32_t) -> svuint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u64_f64(op: svfloat64_t) -> svuint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u64_s8(op: svint8_t) -> svuint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u64_s16(op: svint16_t) -> svuint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u64_s32(op: svint32_t) -> svuint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u64_s64(op: svint64_t) -> svuint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u64_u8(op: svuint8_t) -> svuint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u64_u16(op: svuint16_t) -> svuint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u64_u32(op: svuint32_t) -> svuint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reinterpret vector contents"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svreinterpret_u64_u64(op: svuint64_t) -> svuint64_t {
+    unsafe { simd_reinterpret(op) }
+}
+#[doc = "Reverse all elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rev))]
+pub fn svrev_b8(op: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv16i1")]
+        fn _svrev_b8(op: svbool_t) -> svbool_t;
+    }
+    unsafe { _svrev_b8(op) }
+}
+#[doc = "Reverse all elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rev))]
+pub fn svrev_b16(op: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv8i1")]
+        fn _svrev_b16(op: svbool8_t) -> svbool8_t;
+    }
+    unsafe { _svrev_b16(op.into()).into() }
+}
+#[doc = "Reverse all elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rev))]
+pub fn svrev_b32(op: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4i1")]
+        fn _svrev_b32(op: svbool4_t) -> svbool4_t;
+    }
+    unsafe { _svrev_b32(op.into()).into() }
+}
+#[doc = "Reverse all elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rev))]
+pub fn svrev_b64(op: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2i1")]
+        fn _svrev_b64(op: svbool2_t) -> svbool2_t;
+    }
+    unsafe { _svrev_b64(op.into()).into() }
+}
+#[doc = "Reverse all elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rev))]
+pub fn svrev_f32(op: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4f32")]
+        fn _svrev_f32(op: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svrev_f32(op) }
+}
+#[doc = "Reverse all elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rev))]
+pub fn svrev_f64(op: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2f64")]
+        fn _svrev_f64(op: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svrev_f64(op) }
+}
+#[doc = "Reverse all elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rev))]
+pub fn svrev_s8(op: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv16i8")]
+        fn _svrev_s8(op: svint8_t) -> svint8_t;
+    }
+    unsafe { _svrev_s8(op) }
+}
+#[doc = "Reverse all elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rev))]
+pub fn svrev_s16(op: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv8i16")]
+        fn _svrev_s16(op: svint16_t) -> svint16_t;
+    }
+    unsafe { _svrev_s16(op) }
+}
+#[doc = "Reverse all elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rev))]
+pub fn svrev_s32(op: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4i32")]
+        fn _svrev_s32(op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svrev_s32(op) }
+}
+#[doc = "Reverse all elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rev))]
+pub fn svrev_s64(op: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2i64")]
+        fn _svrev_s64(op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svrev_s64(op) }
+}
+#[doc = "Reverse all elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rev))]
+pub fn svrev_u8(op: svuint8_t) -> svuint8_t {
+    unsafe { svrev_s8(op.as_signed()).as_unsigned() }
+}
+#[doc = "Reverse all elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rev))]
+pub fn svrev_u16(op: svuint16_t) -> svuint16_t {
+    unsafe { svrev_s16(op.as_signed()).as_unsigned() }
+}
+#[doc = "Reverse all elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rev))]
+pub fn svrev_u32(op: svuint32_t) -> svuint32_t {
+    unsafe { svrev_s32(op.as_signed()).as_unsigned() }
+}
+#[doc = "Reverse all elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(rev))]
+pub fn svrev_u64(op: svuint64_t) -> svuint64_t {
+    unsafe { svrev_s64(op.as_signed()).as_unsigned() }
+}
+#[doc = "Reverse bytes within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revb))]
+pub fn svrevb_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv8i16")]
+        fn _svrevb_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
+    }
+    unsafe { _svrevb_s16_m(inactive, pg.into(), op) }
+}
+#[doc = "Reverse bytes within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revb))]
+pub fn svrevb_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t {
+    svrevb_s16_m(op, pg, op)
+}
+#[doc = "Reverse bytes within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revb))]
+pub fn svrevb_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t {
+    svrevb_s16_m(svdup_n_s16(0), pg, op)
+}
+#[doc = "Reverse bytes within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revb))]
+pub fn svrevb_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv4i32")]
+        fn _svrevb_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svrevb_s32_m(inactive, pg.into(), op) }
+}
+#[doc = "Reverse bytes within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revb))]
+pub fn svrevb_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svrevb_s32_m(op, pg, op)
+}
+#[doc = "Reverse bytes within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revb))]
+pub fn svrevb_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svrevb_s32_m(svdup_n_s32(0), pg, op)
+}
+#[doc = "Reverse bytes within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revb))]
+pub fn svrevb_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revb.nxv2i64")]
+        fn _svrevb_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svrevb_s64_m(inactive, pg.into(), op) }
+}
+#[doc = "Reverse bytes within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revb))]
+pub fn svrevb_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svrevb_s64_m(op, pg, op)
+}
+#[doc = "Reverse bytes within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revb))]
+pub fn svrevb_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svrevb_s64_m(svdup_n_s64(0), pg, op)
+}
+#[doc = "Reverse bytes within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revb))]
+pub fn svrevb_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    unsafe { svrevb_s16_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Reverse bytes within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revb))]
+pub fn svrevb_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    svrevb_u16_m(op, pg, op)
+}
+#[doc = "Reverse bytes within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revb))]
+pub fn svrevb_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t {
+    svrevb_u16_m(svdup_n_u16(0), pg, op)
+}
+#[doc = "Reverse bytes within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revb))]
+pub fn svrevb_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    unsafe { svrevb_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Reverse bytes within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revb))]
+pub fn svrevb_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svrevb_u32_m(op, pg, op)
+}
+#[doc = "Reverse bytes within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revb))]
+pub fn svrevb_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svrevb_u32_m(svdup_n_u32(0), pg, op)
+}
+#[doc = "Reverse bytes within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revb))]
+pub fn svrevb_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    unsafe { svrevb_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Reverse bytes within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revb))]
+pub fn svrevb_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svrevb_u64_m(op, pg, op)
+}
+#[doc = "Reverse bytes within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revb))]
+pub fn svrevb_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svrevb_u64_m(svdup_n_u64(0), pg, op)
+}
+#[doc = "Reverse halfwords within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revh))]
+pub fn svrevh_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revh.nxv4i32")]
+        fn _svrevh_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svrevh_s32_m(inactive, pg.into(), op) }
+}
+#[doc = "Reverse halfwords within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revh))]
+pub fn svrevh_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svrevh_s32_m(op, pg, op)
+}
+#[doc = "Reverse halfwords within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revh))]
+pub fn svrevh_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svrevh_s32_m(svdup_n_s32(0), pg, op)
+}
+#[doc = "Reverse halfwords within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revh))]
+pub fn svrevh_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revh.nxv2i64")]
+        fn _svrevh_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svrevh_s64_m(inactive, pg.into(), op) }
+}
+#[doc = "Reverse halfwords within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revh))]
+pub fn svrevh_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svrevh_s64_m(op, pg, op)
+}
+#[doc = "Reverse halfwords within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revh))]
+pub fn svrevh_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svrevh_s64_m(svdup_n_s64(0), pg, op)
+}
+#[doc = "Reverse halfwords within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revh))]
+pub fn svrevh_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    unsafe { svrevh_s32_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Reverse halfwords within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revh))]
+pub fn svrevh_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svrevh_u32_m(op, pg, op)
+}
+#[doc = "Reverse halfwords within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revh))]
+pub fn svrevh_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svrevh_u32_m(svdup_n_u32(0), pg, op)
+}
+#[doc = "Reverse halfwords within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revh))]
+pub fn svrevh_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    unsafe { svrevh_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Reverse halfwords within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revh))]
+pub fn svrevh_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svrevh_u64_m(op, pg, op)
+}
+#[doc = "Reverse halfwords within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revh))]
+pub fn svrevh_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svrevh_u64_m(svdup_n_u64(0), pg, op)
+}
+#[doc = "Reverse words within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revw))]
+pub fn svrevw_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.revw.nxv2i64")]
+        fn _svrevw_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svrevw_s64_m(inactive, pg.into(), op) }
+}
+#[doc = "Reverse words within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revw))]
+pub fn svrevw_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svrevw_s64_m(op, pg, op)
+}
+#[doc = "Reverse words within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revw))]
+pub fn svrevw_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svrevw_s64_m(svdup_n_s64(0), pg, op)
+}
+#[doc = "Reverse words within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revw))]
+pub fn svrevw_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    unsafe { svrevw_s64_m(inactive.as_signed(), pg, op.as_signed()).as_unsigned() }
+}
+#[doc = "Reverse words within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revw))]
+pub fn svrevw_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svrevw_u64_m(op, pg, op)
+}
+#[doc = "Reverse words within elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(revw))]
+pub fn svrevw_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t {
+    svrevw_u64_m(svdup_n_u64(0), pg, op)
+}
+#[doc = "Round to nearest, ties away from zero"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frinta))]
+pub fn svrinta_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinta.nxv4f32")]
+        fn _svrinta_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svrinta_f32_m(inactive, pg.into(), op) }
+}
+#[doc = "Round to nearest, ties away from zero"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frinta))]
+pub fn svrinta_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svrinta_f32_m(op, pg, op)
+}
+#[doc = "Round to nearest, ties away from zero"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frinta))]
+pub fn svrinta_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svrinta_f32_m(svdup_n_f32(0.0), pg, op)
+}
+#[doc = "Round to nearest, ties away from zero"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frinta))]
+pub fn svrinta_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinta.nxv2f64")]
+        fn _svrinta_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svrinta_f64_m(inactive, pg.into(), op) }
+}
+#[doc = "Round to nearest, ties away from zero"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frinta))]
+pub fn svrinta_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svrinta_f64_m(op, pg, op)
+}
+#[doc = "Round to nearest, ties away from zero"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frinta))]
+pub fn svrinta_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svrinta_f64_m(svdup_n_f64(0.0), pg, op)
+}
+#[doc = "Round using current rounding mode (inexact)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frinti))]
+pub fn svrinti_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinti.nxv4f32")]
+        fn _svrinti_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svrinti_f32_m(inactive, pg.into(), op) }
+}
+#[doc = "Round using current rounding mode (inexact)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frinti))]
+pub fn svrinti_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svrinti_f32_m(op, pg, op)
+}
+#[doc = "Round using current rounding mode (inexact)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frinti))]
+pub fn svrinti_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svrinti_f32_m(svdup_n_f32(0.0), pg, op)
+}
+#[doc = "Round using current rounding mode (inexact)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frinti))]
+pub fn svrinti_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frinti.nxv2f64")]
+        fn _svrinti_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svrinti_f64_m(inactive, pg.into(), op) }
+}
+#[doc = "Round using current rounding mode (inexact)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frinti))]
+pub fn svrinti_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svrinti_f64_m(op, pg, op)
+}
+#[doc = "Round using current rounding mode (inexact)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frinti))]
+pub fn svrinti_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svrinti_f64_m(svdup_n_f64(0.0), pg, op)
+}
+#[doc = "Round towards -∞"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintm))]
+pub fn svrintm_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintm.nxv4f32")]
+        fn _svrintm_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svrintm_f32_m(inactive, pg.into(), op) }
+}
+#[doc = "Round towards -∞"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintm))]
+pub fn svrintm_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svrintm_f32_m(op, pg, op)
+}
+#[doc = "Round towards -∞"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintm))]
+pub fn svrintm_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svrintm_f32_m(svdup_n_f32(0.0), pg, op)
+}
+#[doc = "Round towards -∞"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintm))]
+pub fn svrintm_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintm.nxv2f64")]
+        fn _svrintm_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svrintm_f64_m(inactive, pg.into(), op) }
+}
+#[doc = "Round towards -∞"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintm))]
+pub fn svrintm_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svrintm_f64_m(op, pg, op)
+}
+#[doc = "Round towards -∞"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintm))]
+pub fn svrintm_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svrintm_f64_m(svdup_n_f64(0.0), pg, op)
+}
+#[doc = "Round to nearest, ties to even"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintn))]
+pub fn svrintn_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintn.nxv4f32")]
+        fn _svrintn_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svrintn_f32_m(inactive, pg.into(), op) }
+}
+#[doc = "Round to nearest, ties to even"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintn))]
+pub fn svrintn_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svrintn_f32_m(op, pg, op)
+}
+#[doc = "Round to nearest, ties to even"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintn))]
+pub fn svrintn_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svrintn_f32_m(svdup_n_f32(0.0), pg, op)
+}
+#[doc = "Round to nearest, ties to even"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintn))]
+pub fn svrintn_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintn.nxv2f64")]
+        fn _svrintn_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svrintn_f64_m(inactive, pg.into(), op) }
+}
+#[doc = "Round to nearest, ties to even"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintn))]
+pub fn svrintn_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svrintn_f64_m(op, pg, op)
+}
+#[doc = "Round to nearest, ties to even"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintn))]
+pub fn svrintn_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svrintn_f64_m(svdup_n_f64(0.0), pg, op)
+}
+#[doc = "Round towards +∞"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintp))]
+pub fn svrintp_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintp.nxv4f32")]
+        fn _svrintp_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svrintp_f32_m(inactive, pg.into(), op) }
+}
+#[doc = "Round towards +∞"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintp))]
+pub fn svrintp_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svrintp_f32_m(op, pg, op)
+}
+#[doc = "Round towards +∞"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintp))]
+pub fn svrintp_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svrintp_f32_m(svdup_n_f32(0.0), pg, op)
+}
+#[doc = "Round towards +∞"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintp))]
+pub fn svrintp_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintp.nxv2f64")]
+        fn _svrintp_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svrintp_f64_m(inactive, pg.into(), op) }
+}
+#[doc = "Round towards +∞"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintp))]
+pub fn svrintp_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svrintp_f64_m(op, pg, op)
+}
+#[doc = "Round towards +∞"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintp))]
+pub fn svrintp_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svrintp_f64_m(svdup_n_f64(0.0), pg, op)
+}
+#[doc = "Round using current rounding mode (exact)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintx))]
+pub fn svrintx_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintx.nxv4f32")]
+        fn _svrintx_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svrintx_f32_m(inactive, pg.into(), op) }
+}
+#[doc = "Round using current rounding mode (exact)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintx))]
+pub fn svrintx_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svrintx_f32_m(op, pg, op)
+}
+#[doc = "Round using current rounding mode (exact)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintx))]
+pub fn svrintx_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svrintx_f32_m(svdup_n_f32(0.0), pg, op)
+}
+#[doc = "Round using current rounding mode (exact)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintx))]
+pub fn svrintx_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintx.nxv2f64")]
+        fn _svrintx_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svrintx_f64_m(inactive, pg.into(), op) }
+}
+#[doc = "Round using current rounding mode (exact)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintx))]
+pub fn svrintx_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svrintx_f64_m(op, pg, op)
+}
+#[doc = "Round using current rounding mode (exact)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintx))]
+pub fn svrintx_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svrintx_f64_m(svdup_n_f64(0.0), pg, op)
+}
+#[doc = "Round towards zero"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintz))]
+pub fn svrintz_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintz.nxv4f32")]
+        fn _svrintz_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svrintz_f32_m(inactive, pg.into(), op) }
+}
+#[doc = "Round towards zero"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintz))]
+pub fn svrintz_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svrintz_f32_m(op, pg, op)
+}
+#[doc = "Round towards zero"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintz))]
+pub fn svrintz_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svrintz_f32_m(svdup_n_f32(0.0), pg, op)
+}
+#[doc = "Round towards zero"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintz))]
+pub fn svrintz_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.frintz.nxv2f64")]
+        fn _svrintz_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svrintz_f64_m(inactive, pg.into(), op) }
+}
+#[doc = "Round towards zero"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintz))]
+pub fn svrintz_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svrintz_f64_m(op, pg, op)
+}
+#[doc = "Round towards zero"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frintz))]
+pub fn svrintz_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svrintz_f64_m(svdup_n_f64(0.0), pg, op)
+}
+#[doc = "Reciprocal square root estimate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frsqrte))]
+pub fn svrsqrte_f32(op: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.frsqrte.x.nxv4f32"
+        )]
+        fn _svrsqrte_f32(op: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svrsqrte_f32(op) }
+}
+#[doc = "Reciprocal square root estimate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frsqrte))]
+pub fn svrsqrte_f64(op: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.frsqrte.x.nxv2f64"
+        )]
+        fn _svrsqrte_f64(op: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svrsqrte_f64(op) }
+}
+#[doc = "Reciprocal square root step"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frsqrts))]
+pub fn svrsqrts_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.frsqrts.x.nxv4f32"
+        )]
+        fn _svrsqrts_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svrsqrts_f32(op1, op2) }
+}
+#[doc = "Reciprocal square root step"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(frsqrts))]
+pub fn svrsqrts_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.frsqrts.x.nxv2f64"
+        )]
+        fn _svrsqrts_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svrsqrts_f64(op1, op2) }
+}
+#[doc = "Adjust exponent"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fscale))]
+pub fn svscale_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fscale.nxv4f32")]
+        fn _svscale_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t;
+    }
+    unsafe { _svscale_f32_m(pg.into(), op1, op2) }
+}
+#[doc = "Adjust exponent"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fscale))]
+pub fn svscale_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t {
+    svscale_f32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Adjust exponent"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fscale))]
+pub fn svscale_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t {
+    svscale_f32_m(pg, op1, op2)
+}
+#[doc = "Adjust exponent"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fscale))]
+pub fn svscale_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t {
+    svscale_f32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Adjust exponent"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fscale))]
+pub fn svscale_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat32_t {
+    svscale_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
+}
+#[doc = "Adjust exponent"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fscale))]
+pub fn svscale_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t {
+    svscale_f32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Adjust exponent"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fscale))]
+pub fn svscale_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fscale.nxv2f64")]
+        fn _svscale_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t;
+    }
+    unsafe { _svscale_f64_m(pg.into(), op1, op2) }
+}
+#[doc = "Adjust exponent"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fscale))]
+pub fn svscale_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t {
+    svscale_f64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Adjust exponent"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fscale))]
+pub fn svscale_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t {
+    svscale_f64_m(pg, op1, op2)
+}
+#[doc = "Adjust exponent"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fscale))]
+pub fn svscale_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t {
+    svscale_f64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Adjust exponent"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fscale))]
+pub fn svscale_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat64_t {
+    svscale_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
+}
+#[doc = "Adjust exponent"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fscale))]
+pub fn svscale_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t {
+    svscale_f64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Conditionally select elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_b])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sel))]
+pub fn svsel_b(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t {
+    unsafe { simd_select(pg, op1, op2) }
+}
+#[doc = "Conditionally select elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sel))]
+pub fn svsel_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    unsafe { simd_select::<svbool4_t, _>(pg.into(), op1, op2) }
+}
+#[doc = "Conditionally select elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sel))]
+pub fn svsel_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    unsafe { simd_select::<svbool2_t, _>(pg.into(), op1, op2) }
+}
+#[doc = "Conditionally select elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sel))]
+pub fn svsel_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    unsafe { simd_select::<svbool_t, _>(pg, op1, op2) }
+}
+#[doc = "Conditionally select elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sel))]
+pub fn svsel_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    unsafe { simd_select::<svbool8_t, _>(pg.into(), op1, op2) }
+}
+#[doc = "Conditionally select elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sel))]
+pub fn svsel_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    unsafe { simd_select::<svbool4_t, _>(pg.into(), op1, op2) }
+}
+#[doc = "Conditionally select elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sel))]
+pub fn svsel_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    unsafe { simd_select::<svbool2_t, _>(pg.into(), op1, op2) }
+}
+#[doc = "Conditionally select elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sel))]
+pub fn svsel_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { simd_select::<svbool_t, _>(pg, op1, op2) }
+}
+#[doc = "Conditionally select elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sel))]
+pub fn svsel_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { simd_select::<svbool8_t, _>(pg.into(), op1, op2) }
+}
+#[doc = "Conditionally select elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sel))]
+pub fn svsel_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { simd_select::<svbool4_t, _>(pg.into(), op1, op2) }
+}
+#[doc = "Conditionally select elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sel))]
+pub fn svsel_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { simd_select::<svbool2_t, _>(pg.into(), op1, op2) }
+}
+#[doc = "Change one vector in a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset2_f32<const IMM_INDEX: i32>(tuple: svfloat32x2_t, x: svfloat32_t) -> svfloat32x2_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.set.nxv8f32.nxv4f32"
+        )]
+        fn _svset2_f32(tuple: svfloat32x2_t, imm_index: i32, x: svfloat32_t) -> svfloat32x2_t;
+    }
+    unsafe { _svset2_f32(tuple, IMM_INDEX, x) }
+}
+#[doc = "Change one vector in a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset2_f64<const IMM_INDEX: i32>(tuple: svfloat64x2_t, x: svfloat64_t) -> svfloat64x2_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.set.nxv4f64.nxv2f64"
+        )]
+        fn _svset2_f64(tuple: svfloat64x2_t, imm_index: i32, x: svfloat64_t) -> svfloat64x2_t;
+    }
+    unsafe { _svset2_f64(tuple, IMM_INDEX, x) }
+}
+#[doc = "Change one vector in a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset2_s8<const IMM_INDEX: i32>(tuple: svint8x2_t, x: svint8_t) -> svint8x2_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.set.nxv32i8.nxv16i8"
+        )]
+        fn _svset2_s8(tuple: svint8x2_t, imm_index: i32, x: svint8_t) -> svint8x2_t;
+    }
+    unsafe { _svset2_s8(tuple, IMM_INDEX, x) }
+}
+#[doc = "Change one vector in a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset2_s16<const IMM_INDEX: i32>(tuple: svint16x2_t, x: svint16_t) -> svint16x2_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.set.nxv16i16.nxv8i16"
+        )]
+        fn _svset2_s16(tuple: svint16x2_t, imm_index: i32, x: svint16_t) -> svint16x2_t;
+    }
+    unsafe { _svset2_s16(tuple, IMM_INDEX, x) }
+}
+#[doc = "Change one vector in a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset2_s32<const IMM_INDEX: i32>(tuple: svint32x2_t, x: svint32_t) -> svint32x2_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.set.nxv8i32.nxv4i32"
+        )]
+        fn _svset2_s32(tuple: svint32x2_t, imm_index: i32, x: svint32_t) -> svint32x2_t;
+    }
+    unsafe { _svset2_s32(tuple, IMM_INDEX, x) }
+}
+#[doc = "Change one vector in a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset2_s64<const IMM_INDEX: i32>(tuple: svint64x2_t, x: svint64_t) -> svint64x2_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.set.nxv4i64.nxv2i64"
+        )]
+        fn _svset2_s64(tuple: svint64x2_t, imm_index: i32, x: svint64_t) -> svint64x2_t;
+    }
+    unsafe { _svset2_s64(tuple, IMM_INDEX, x) }
+}
+#[doc = "Change one vector in a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset2_u8<const IMM_INDEX: i32>(tuple: svuint8x2_t, x: svuint8_t) -> svuint8x2_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    unsafe { svset2_s8::<IMM_INDEX>(tuple.as_signed(), x.as_signed()).as_unsigned() }
+}
+#[doc = "Change one vector in a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset2_u16<const IMM_INDEX: i32>(tuple: svuint16x2_t, x: svuint16_t) -> svuint16x2_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    unsafe { svset2_s16::<IMM_INDEX>(tuple.as_signed(), x.as_signed()).as_unsigned() }
+}
+#[doc = "Change one vector in a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset2_u32<const IMM_INDEX: i32>(tuple: svuint32x2_t, x: svuint32_t) -> svuint32x2_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    unsafe { svset2_s32::<IMM_INDEX>(tuple.as_signed(), x.as_signed()).as_unsigned() }
+}
+#[doc = "Change one vector in a tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset2_u64<const IMM_INDEX: i32>(tuple: svuint64x2_t, x: svuint64_t) -> svuint64x2_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    unsafe { svset2_s64::<IMM_INDEX>(tuple.as_signed(), x.as_signed()).as_unsigned() }
+}
+#[doc = "Change one vector in a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset3_f32<const IMM_INDEX: i32>(tuple: svfloat32x3_t, x: svfloat32_t) -> svfloat32x3_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.set.nxv12f32.nxv4f32"
+        )]
+        fn _svset3_f32(tuple: svfloat32x3_t, imm_index: i32, x: svfloat32_t) -> svfloat32x3_t;
+    }
+    unsafe { _svset3_f32(tuple, IMM_INDEX, x) }
+}
+#[doc = "Change one vector in a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset3_f64<const IMM_INDEX: i32>(tuple: svfloat64x3_t, x: svfloat64_t) -> svfloat64x3_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.set.nxv6f64.nxv2f64"
+        )]
+        fn _svset3_f64(tuple: svfloat64x3_t, imm_index: i32, x: svfloat64_t) -> svfloat64x3_t;
+    }
+    unsafe { _svset3_f64(tuple, IMM_INDEX, x) }
+}
+#[doc = "Change one vector in a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset3_s8<const IMM_INDEX: i32>(tuple: svint8x3_t, x: svint8_t) -> svint8x3_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.set.nxv48i8.nxv16i8"
+        )]
+        fn _svset3_s8(tuple: svint8x3_t, imm_index: i32, x: svint8_t) -> svint8x3_t;
+    }
+    unsafe { _svset3_s8(tuple, IMM_INDEX, x) }
+}
+#[doc = "Change one vector in a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset3_s16<const IMM_INDEX: i32>(tuple: svint16x3_t, x: svint16_t) -> svint16x3_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.set.nxv24i16.nxv8i16"
+        )]
+        fn _svset3_s16(tuple: svint16x3_t, imm_index: i32, x: svint16_t) -> svint16x3_t;
+    }
+    unsafe { _svset3_s16(tuple, IMM_INDEX, x) }
+}
+#[doc = "Change one vector in a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset3_s32<const IMM_INDEX: i32>(tuple: svint32x3_t, x: svint32_t) -> svint32x3_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.set.nxv12i32.nxv4i32"
+        )]
+        fn _svset3_s32(tuple: svint32x3_t, imm_index: i32, x: svint32_t) -> svint32x3_t;
+    }
+    unsafe { _svset3_s32(tuple, IMM_INDEX, x) }
+}
+#[doc = "Change one vector in a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset3_s64<const IMM_INDEX: i32>(tuple: svint64x3_t, x: svint64_t) -> svint64x3_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.set.nxv6i64.nxv2i64"
+        )]
+        fn _svset3_s64(tuple: svint64x3_t, imm_index: i32, x: svint64_t) -> svint64x3_t;
+    }
+    unsafe { _svset3_s64(tuple, IMM_INDEX, x) }
+}
+#[doc = "Change one vector in a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset3_u8<const IMM_INDEX: i32>(tuple: svuint8x3_t, x: svuint8_t) -> svuint8x3_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    unsafe { svset3_s8::<IMM_INDEX>(tuple.as_signed(), x.as_signed()).as_unsigned() }
+}
+#[doc = "Change one vector in a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset3_u16<const IMM_INDEX: i32>(tuple: svuint16x3_t, x: svuint16_t) -> svuint16x3_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    unsafe { svset3_s16::<IMM_INDEX>(tuple.as_signed(), x.as_signed()).as_unsigned() }
+}
+#[doc = "Change one vector in a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset3_u32<const IMM_INDEX: i32>(tuple: svuint32x3_t, x: svuint32_t) -> svuint32x3_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    unsafe { svset3_s32::<IMM_INDEX>(tuple.as_signed(), x.as_signed()).as_unsigned() }
+}
+#[doc = "Change one vector in a tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset3_u64<const IMM_INDEX: i32>(tuple: svuint64x3_t, x: svuint64_t) -> svuint64x3_t {
+    static_assert_range!(IMM_INDEX, 0, 2);
+    unsafe { svset3_s64::<IMM_INDEX>(tuple.as_signed(), x.as_signed()).as_unsigned() }
+}
+#[doc = "Change one vector in a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset4_f32<const IMM_INDEX: i32>(tuple: svfloat32x4_t, x: svfloat32_t) -> svfloat32x4_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.set.nxv16f32.nxv4f32"
+        )]
+        fn _svset4_f32(tuple: svfloat32x4_t, imm_index: i32, x: svfloat32_t) -> svfloat32x4_t;
+    }
+    unsafe { _svset4_f32(tuple, IMM_INDEX, x) }
+}
+#[doc = "Change one vector in a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset4_f64<const IMM_INDEX: i32>(tuple: svfloat64x4_t, x: svfloat64_t) -> svfloat64x4_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.set.nxv8f64.nxv2f64"
+        )]
+        fn _svset4_f64(tuple: svfloat64x4_t, imm_index: i32, x: svfloat64_t) -> svfloat64x4_t;
+    }
+    unsafe { _svset4_f64(tuple, IMM_INDEX, x) }
+}
+#[doc = "Change one vector in a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset4_s8<const IMM_INDEX: i32>(tuple: svint8x4_t, x: svint8_t) -> svint8x4_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.set.nxv64i8.nxv16i8"
+        )]
+        fn _svset4_s8(tuple: svint8x4_t, imm_index: i32, x: svint8_t) -> svint8x4_t;
+    }
+    unsafe { _svset4_s8(tuple, IMM_INDEX, x) }
+}
+#[doc = "Change one vector in a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset4_s16<const IMM_INDEX: i32>(tuple: svint16x4_t, x: svint16_t) -> svint16x4_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.set.nxv32i16.nxv8i16"
+        )]
+        fn _svset4_s16(tuple: svint16x4_t, imm_index: i32, x: svint16_t) -> svint16x4_t;
+    }
+    unsafe { _svset4_s16(tuple, IMM_INDEX, x) }
+}
+#[doc = "Change one vector in a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset4_s32<const IMM_INDEX: i32>(tuple: svint32x4_t, x: svint32_t) -> svint32x4_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.set.nxv16i32.nxv4i32"
+        )]
+        fn _svset4_s32(tuple: svint32x4_t, imm_index: i32, x: svint32_t) -> svint32x4_t;
+    }
+    unsafe { _svset4_s32(tuple, IMM_INDEX, x) }
+}
+#[doc = "Change one vector in a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset4_s64<const IMM_INDEX: i32>(tuple: svint64x4_t, x: svint64_t) -> svint64x4_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.tuple.set.nxv8i64.nxv2i64"
+        )]
+        fn _svset4_s64(tuple: svint64x4_t, imm_index: i32, x: svint64_t) -> svint64x4_t;
+    }
+    unsafe { _svset4_s64(tuple, IMM_INDEX, x) }
+}
+#[doc = "Change one vector in a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset4_u8<const IMM_INDEX: i32>(tuple: svuint8x4_t, x: svuint8_t) -> svuint8x4_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    unsafe { svset4_s8::<IMM_INDEX>(tuple.as_signed(), x.as_signed()).as_unsigned() }
+}
+#[doc = "Change one vector in a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset4_u16<const IMM_INDEX: i32>(tuple: svuint16x4_t, x: svuint16_t) -> svuint16x4_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    unsafe { svset4_s16::<IMM_INDEX>(tuple.as_signed(), x.as_signed()).as_unsigned() }
+}
+#[doc = "Change one vector in a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset4_u32<const IMM_INDEX: i32>(tuple: svuint32x4_t, x: svuint32_t) -> svuint32x4_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    unsafe { svset4_s32::<IMM_INDEX>(tuple.as_signed(), x.as_signed()).as_unsigned() }
+}
+#[doc = "Change one vector in a tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+pub fn svset4_u64<const IMM_INDEX: i32>(tuple: svuint64x4_t, x: svuint64_t) -> svuint64x4_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    unsafe { svset4_s64::<IMM_INDEX>(tuple.as_signed(), x.as_signed()).as_unsigned() }
+}
+#[doc = "Initialize the first-fault register to all-true"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsetffr)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(setffr))]
+pub fn svsetffr() {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.setffr")]
+        fn _svsetffr();
+    }
+    unsafe { _svsetffr() }
+}
+#[doc = "Splice two vectors under predicate control"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(splice))]
+pub fn svsplice_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv4f32")]
+        fn _svsplice_f32(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svsplice_f32(pg.into(), op1, op2) }
+}
+#[doc = "Splice two vectors under predicate control"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(splice))]
+pub fn svsplice_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv2f64")]
+        fn _svsplice_f64(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svsplice_f64(pg.into(), op1, op2) }
+}
+#[doc = "Splice two vectors under predicate control"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(splice))]
+pub fn svsplice_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv16i8")]
+        fn _svsplice_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svsplice_s8(pg, op1, op2) }
+}
+#[doc = "Splice two vectors under predicate control"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(splice))]
+pub fn svsplice_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv8i16")]
+        fn _svsplice_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svsplice_s16(pg.into(), op1, op2) }
+}
+#[doc = "Splice two vectors under predicate control"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(splice))]
+pub fn svsplice_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv4i32")]
+        fn _svsplice_s32(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svsplice_s32(pg.into(), op1, op2) }
+}
+#[doc = "Splice two vectors under predicate control"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(splice))]
+pub fn svsplice_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.splice.nxv2i64")]
+        fn _svsplice_s64(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svsplice_s64(pg.into(), op1, op2) }
+}
+#[doc = "Splice two vectors under predicate control"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(splice))]
+pub fn svsplice_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svsplice_s8(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Splice two vectors under predicate control"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(splice))]
+pub fn svsplice_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svsplice_s16(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Splice two vectors under predicate control"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(splice))]
+pub fn svsplice_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svsplice_s32(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Splice two vectors under predicate control"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(splice))]
+pub fn svsplice_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svsplice_s64(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Square root"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsqrt))]
+pub fn svsqrt_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsqrt.nxv4f32")]
+        fn _svsqrt_f32_m(inactive: svfloat32_t, pg: svbool4_t, op: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svsqrt_f32_m(inactive, pg.into(), op) }
+}
+#[doc = "Square root"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsqrt))]
+pub fn svsqrt_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svsqrt_f32_m(op, pg, op)
+}
+#[doc = "Square root"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsqrt))]
+pub fn svsqrt_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t {
+    svsqrt_f32_m(svdup_n_f32(0.0), pg, op)
+}
+#[doc = "Square root"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsqrt))]
+pub fn svsqrt_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsqrt.nxv2f64")]
+        fn _svsqrt_f64_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svsqrt_f64_m(inactive, pg.into(), op) }
+}
+#[doc = "Square root"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsqrt))]
+pub fn svsqrt_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svsqrt_f64_m(op, pg, op)
+}
+#[doc = "Square root"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsqrt))]
+pub fn svsqrt_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t {
+    svsqrt_f64_m(svdup_n_f64(0.0), pg, op)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4f32")]
+        fn _svst1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32);
+    }
+    _svst1_f32(data, pg.into(), base)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2f64")]
+        fn _svst1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64);
+    }
+    _svst1_f64(data, pg.into(), base)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv16i8")]
+        fn _svst1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8);
+    }
+    _svst1_s8(data, pg, base)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i16")]
+        fn _svst1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16);
+    }
+    _svst1_s16(data, pg.into(), base)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i32")]
+        fn _svst1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32);
+    }
+    _svst1_s32(data, pg.into(), base)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i64")]
+        fn _svst1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64);
+    }
+    _svst1_s64(data, pg.into(), base)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) {
+    svst1_s8(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) {
+    svst1_s16(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) {
+    svst1_s32(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) {
+    svst1_s64(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_s32index_f32(
+    pg: svbool_t,
+    base: *mut f32,
+    indices: svint32_t,
+    data: svfloat32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4f32"
+        )]
+        fn _svst1_scatter_s32index_f32(
+            data: svfloat32_t,
+            pg: svbool4_t,
+            base: *mut f32,
+            indices: svint32_t,
+        );
+    }
+    _svst1_scatter_s32index_f32(data, pg.into(), base, indices)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_s32index_s32(
+    pg: svbool_t,
+    base: *mut i32,
+    indices: svint32_t,
+    data: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i32"
+        )]
+        fn _svst1_scatter_s32index_s32(
+            data: svint32_t,
+            pg: svbool4_t,
+            base: *mut i32,
+            indices: svint32_t,
+        );
+    }
+    _svst1_scatter_s32index_s32(data, pg.into(), base, indices)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]index[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_s32index_u32(
+    pg: svbool_t,
+    base: *mut u32,
+    indices: svint32_t,
+    data: svuint32_t,
+) {
+    svst1_scatter_s32index_s32(pg, base.as_signed(), indices, data.as_signed())
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_s64index_f64(
+    pg: svbool_t,
+    base: *mut f64,
+    indices: svint64_t,
+    data: svfloat64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2f64"
+        )]
+        fn _svst1_scatter_s64index_f64(
+            data: svfloat64_t,
+            pg: svbool2_t,
+            base: *mut f64,
+            indices: svint64_t,
+        );
+    }
+    _svst1_scatter_s64index_f64(data, pg.into(), base, indices)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_s64index_s64(
+    pg: svbool_t,
+    base: *mut i64,
+    indices: svint64_t,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i64"
+        )]
+        fn _svst1_scatter_s64index_s64(
+            data: svint64_t,
+            pg: svbool2_t,
+            base: *mut i64,
+            indices: svint64_t,
+        );
+    }
+    _svst1_scatter_s64index_s64(data, pg.into(), base, indices)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_s64index_u64(
+    pg: svbool_t,
+    base: *mut u64,
+    indices: svint64_t,
+    data: svuint64_t,
+) {
+    svst1_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed())
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_u32index_f32(
+    pg: svbool_t,
+    base: *mut f32,
+    indices: svuint32_t,
+    data: svfloat32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4f32"
+        )]
+        fn _svst1_scatter_u32index_f32(
+            data: svfloat32_t,
+            pg: svbool4_t,
+            base: *mut f32,
+            indices: svint32_t,
+        );
+    }
+    _svst1_scatter_u32index_f32(data, pg.into(), base, indices.as_signed())
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_u32index_s32(
+    pg: svbool_t,
+    base: *mut i32,
+    indices: svuint32_t,
+    data: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i32"
+        )]
+        fn _svst1_scatter_u32index_s32(
+            data: svint32_t,
+            pg: svbool4_t,
+            base: *mut i32,
+            indices: svint32_t,
+        );
+    }
+    _svst1_scatter_u32index_s32(data, pg.into(), base, indices.as_signed())
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]index[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_u32index_u32(
+    pg: svbool_t,
+    base: *mut u32,
+    indices: svuint32_t,
+    data: svuint32_t,
+) {
+    svst1_scatter_u32index_s32(pg, base.as_signed(), indices, data.as_signed())
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_u64index_f64(
+    pg: svbool_t,
+    base: *mut f64,
+    indices: svuint64_t,
+    data: svfloat64_t,
+) {
+    svst1_scatter_s64index_f64(pg, base, indices.as_signed(), data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_u64index_s64(
+    pg: svbool_t,
+    base: *mut i64,
+    indices: svuint64_t,
+    data: svint64_t,
+) {
+    svst1_scatter_s64index_s64(pg, base, indices.as_signed(), data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_u64index_u64(
+    pg: svbool_t,
+    base: *mut u64,
+    indices: svuint64_t,
+    data: svuint64_t,
+) {
+    svst1_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed())
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_s32offset_f32(
+    pg: svbool_t,
+    base: *mut f32,
+    offsets: svint32_t,
+    data: svfloat32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4f32"
+        )]
+        fn _svst1_scatter_s32offset_f32(
+            data: svfloat32_t,
+            pg: svbool4_t,
+            base: *mut f32,
+            offsets: svint32_t,
+        );
+    }
+    _svst1_scatter_s32offset_f32(data, pg.into(), base, offsets)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_s32offset_s32(
+    pg: svbool_t,
+    base: *mut i32,
+    offsets: svint32_t,
+    data: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i32"
+        )]
+        fn _svst1_scatter_s32offset_s32(
+            data: svint32_t,
+            pg: svbool4_t,
+            base: *mut i32,
+            offsets: svint32_t,
+        );
+    }
+    _svst1_scatter_s32offset_s32(data, pg.into(), base, offsets)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s32]offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_s32offset_u32(
+    pg: svbool_t,
+    base: *mut u32,
+    offsets: svint32_t,
+    data: svuint32_t,
+) {
+    svst1_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed())
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_s64offset_f64(
+    pg: svbool_t,
+    base: *mut f64,
+    offsets: svint64_t,
+    data: svfloat64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.nxv2f64"
+        )]
+        fn _svst1_scatter_s64offset_f64(
+            data: svfloat64_t,
+            pg: svbool2_t,
+            base: *mut f64,
+            offsets: svint64_t,
+        );
+    }
+    _svst1_scatter_s64offset_f64(data, pg.into(), base, offsets)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_s64offset_s64(
+    pg: svbool_t,
+    base: *mut i64,
+    offsets: svint64_t,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.nxv2i64"
+        )]
+        fn _svst1_scatter_s64offset_s64(
+            data: svint64_t,
+            pg: svbool2_t,
+            base: *mut i64,
+            offsets: svint64_t,
+        );
+    }
+    _svst1_scatter_s64offset_s64(data, pg.into(), base, offsets)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[s64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_s64offset_u64(
+    pg: svbool_t,
+    base: *mut u64,
+    offsets: svint64_t,
+    data: svuint64_t,
+) {
+    svst1_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed())
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_u32offset_f32(
+    pg: svbool_t,
+    base: *mut f32,
+    offsets: svuint32_t,
+    data: svfloat32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4f32"
+        )]
+        fn _svst1_scatter_u32offset_f32(
+            data: svfloat32_t,
+            pg: svbool4_t,
+            base: *mut f32,
+            offsets: svint32_t,
+        );
+    }
+    _svst1_scatter_u32offset_f32(data, pg.into(), base, offsets.as_signed())
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_u32offset_s32(
+    pg: svbool_t,
+    base: *mut i32,
+    offsets: svuint32_t,
+    data: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i32"
+        )]
+        fn _svst1_scatter_u32offset_s32(
+            data: svint32_t,
+            pg: svbool4_t,
+            base: *mut i32,
+            offsets: svint32_t,
+        );
+    }
+    _svst1_scatter_u32offset_s32(data, pg.into(), base, offsets.as_signed())
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u32]offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_u32offset_u32(
+    pg: svbool_t,
+    base: *mut u32,
+    offsets: svuint32_t,
+    data: svuint32_t,
+) {
+    svst1_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed())
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_u64offset_f64(
+    pg: svbool_t,
+    base: *mut f64,
+    offsets: svuint64_t,
+    data: svfloat64_t,
+) {
+    svst1_scatter_s64offset_f64(pg, base, offsets.as_signed(), data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_u64offset_s64(
+    pg: svbool_t,
+    base: *mut i64,
+    offsets: svuint64_t,
+    data: svint64_t,
+) {
+    svst1_scatter_s64offset_s64(pg, base, offsets.as_signed(), data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter_[u64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_u64offset_u64(
+    pg: svbool_t,
+    base: *mut u64,
+    offsets: svuint64_t,
+    data: svuint64_t,
+) {
+    svst1_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed())
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_u32base_f32(pg: svbool_t, bases: svuint32_t, data: svfloat32_t) {
+    svst1_scatter_u32base_offset_f32(pg, bases, 0, data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) {
+    svst1_scatter_u32base_offset_s32(pg, bases, 0, data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) {
+    svst1_scatter_u32base_offset_u32(pg, bases, 0, data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_u64base_f64(pg: svbool_t, bases: svuint64_t, data: svfloat64_t) {
+    svst1_scatter_u64base_offset_f64(pg, bases, 0, data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) {
+    svst1_scatter_u64base_offset_s64(pg, bases, 0, data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) {
+    svst1_scatter_u64base_offset_u64(pg, bases, 0, data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_u32base_index_f32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+    data: svfloat32_t,
+) {
+    svst1_scatter_u32base_offset_f32(pg, bases, index.unchecked_shl(2), data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_u32base_index_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+    data: svint32_t,
+) {
+    svst1_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(2), data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_index[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_u32base_index_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+    data: svuint32_t,
+) {
+    svst1_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(2), data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_u64base_index_f64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+    data: svfloat64_t,
+) {
+    svst1_scatter_u64base_offset_f64(pg, bases, index.unchecked_shl(3), data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+    data: svint64_t,
+) {
+    svst1_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(3), data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+    data: svuint64_t,
+) {
+    svst1_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(3), data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_u32base_offset_f32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+    data: svfloat32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4f32.nxv4i32"
+        )]
+        fn _svst1_scatter_u32base_offset_f32(
+            data: svfloat32_t,
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        );
+    }
+    _svst1_scatter_u32base_offset_f32(data, pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+    data: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i32.nxv4i32"
+        )]
+        fn _svst1_scatter_u32base_offset_s32(
+            data: svint32_t,
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        );
+    }
+    _svst1_scatter_u32base_offset_s32(data, pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u32base]_offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_scatter_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+    data: svuint32_t,
+) {
+    svst1_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed())
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_u64base_offset_f64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+    data: svfloat64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2f64.nxv2i64"
+        )]
+        fn _svst1_scatter_u64base_offset_f64(
+            data: svfloat64_t,
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        );
+    }
+    _svst1_scatter_u64base_offset_f64(data, pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i64.nxv2i64"
+        )]
+        fn _svst1_scatter_u64base_offset_s64(
+            data: svint64_t,
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        );
+    }
+    _svst1_scatter_u64base_offset_s64(data, pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_scatter[_u64base]_offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_scatter_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+    data: svuint64_t,
+) {
+    svst1_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed())
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32_t) {
+    svst1_f32(pg, base.offset(svcntw() as isize * vnum as isize), data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64_t) {
+    svst1_f64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8_t) {
+    svst1_s8(pg, base.offset(svcntb() as isize * vnum as isize), data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16_t) {
+    svst1_s16(pg, base.offset(svcnth() as isize * vnum as isize), data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32_t) {
+    svst1_s32(pg, base.offset(svcntw() as isize * vnum as isize), data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64_t) {
+    svst1_s64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8_t) {
+    svst1_u8(pg, base.offset(svcntb() as isize * vnum as isize), data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16_t) {
+    svst1_u16(pg, base.offset(svcnth() as isize * vnum as isize), data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32_t) {
+    svst1_u32(pg, base.offset(svcntw() as isize * vnum as isize), data)
+}
+#[doc = "Non-truncating store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1_vnum[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1d))]
+pub unsafe fn svst1_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64_t) {
+    svst1_u64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_s16(pg: svbool_t, base: *mut i8, data: svint16_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv8i8")]
+        fn _svst1b_s16(data: nxv8i8, pg: svbool8_t, ptr: *mut i8);
+    }
+    _svst1b_s16(simd_cast(data), pg.into(), base)
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_s32(pg: svbool_t, base: *mut i8, data: svint32_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i8")]
+        fn _svst1b_s32(data: nxv4i8, pg: svbool4_t, ptr: *mut i8);
+    }
+    _svst1b_s32(simd_cast(data), pg.into(), base)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_s32(pg: svbool_t, base: *mut i16, data: svint32_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv4i16")]
+        fn _svst1h_s32(data: nxv4i16, pg: svbool4_t, ptr: *mut i16);
+    }
+    _svst1h_s32(simd_cast(data), pg.into(), base)
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_s64(pg: svbool_t, base: *mut i8, data: svint64_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i8")]
+        fn _svst1b_s64(data: nxv2i8, pg: svbool2_t, ptr: *mut i8);
+    }
+    _svst1b_s64(simd_cast(data), pg.into(), base)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_s64(pg: svbool_t, base: *mut i16, data: svint64_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i16")]
+        fn _svst1h_s64(data: nxv2i16, pg: svbool2_t, ptr: *mut i16);
+    }
+    _svst1h_s64(simd_cast(data), pg.into(), base)
+}
+#[doc = "Truncate to 32 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1w_s64(pg: svbool_t, base: *mut i32, data: svint64_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st1.nxv2i32")]
+        fn _svst1w_s64(data: nxv2i32, pg: svbool2_t, ptr: *mut i32);
+    }
+    _svst1w_s64(simd_cast(data), pg.into(), base)
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_u16(pg: svbool_t, base: *mut u8, data: svuint16_t) {
+    svst1b_s16(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_u32(pg: svbool_t, base: *mut u8, data: svuint32_t) {
+    svst1b_s32(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_u32(pg: svbool_t, base: *mut u16, data: svuint32_t) {
+    svst1h_s32(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_u64(pg: svbool_t, base: *mut u8, data: svuint64_t) {
+    svst1b_s64(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_u64(pg: svbool_t, base: *mut u16, data: svuint64_t) {
+    svst1h_s64(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Truncate to 32 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1w_u64(pg: svbool_t, base: *mut u32, data: svuint64_t) {
+    svst1w_s64(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s32]offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_scatter_s32offset_s32(
+    pg: svbool_t,
+    base: *mut i8,
+    offsets: svint32_t,
+    data: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i8"
+        )]
+        fn _svst1b_scatter_s32offset_s32(
+            data: nxv4i8,
+            pg: svbool4_t,
+            base: *mut i8,
+            offsets: svint32_t,
+        );
+    }
+    _svst1b_scatter_s32offset_s32(simd_cast(data), pg.into(), base, offsets)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_s32offset_s32(
+    pg: svbool_t,
+    base: *mut i16,
+    offsets: svint32_t,
+    data: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.sxtw.nxv4i16"
+        )]
+        fn _svst1h_scatter_s32offset_s32(
+            data: nxv4i16,
+            pg: svbool4_t,
+            base: *mut i16,
+            offsets: svint32_t,
+        );
+    }
+    _svst1h_scatter_s32offset_s32(simd_cast(data), pg.into(), base, offsets)
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s32]offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_scatter_s32offset_u32(
+    pg: svbool_t,
+    base: *mut u8,
+    offsets: svint32_t,
+    data: svuint32_t,
+) {
+    svst1b_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed())
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_s32offset_u32(
+    pg: svbool_t,
+    base: *mut u16,
+    offsets: svint32_t,
+    data: svuint32_t,
+) {
+    svst1h_scatter_s32offset_s32(pg, base.as_signed(), offsets, data.as_signed())
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_scatter_s64offset_s64(
+    pg: svbool_t,
+    base: *mut i8,
+    offsets: svint64_t,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.nxv2i8"
+        )]
+        fn _svst1b_scatter_s64offset_s64(
+            data: nxv2i8,
+            pg: svbool2_t,
+            base: *mut i8,
+            offsets: svint64_t,
+        );
+    }
+    _svst1b_scatter_s64offset_s64(simd_cast(data), pg.into(), base, offsets)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_s64offset_s64(
+    pg: svbool_t,
+    base: *mut i16,
+    offsets: svint64_t,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.nxv2i16"
+        )]
+        fn _svst1h_scatter_s64offset_s64(
+            data: nxv2i16,
+            pg: svbool2_t,
+            base: *mut i16,
+            offsets: svint64_t,
+        );
+    }
+    _svst1h_scatter_s64offset_s64(simd_cast(data), pg.into(), base, offsets)
+}
+#[doc = "Truncate to 32 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1w_scatter_s64offset_s64(
+    pg: svbool_t,
+    base: *mut i32,
+    offsets: svint64_t,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.nxv2i32"
+        )]
+        fn _svst1w_scatter_s64offset_s64(
+            data: nxv2i32,
+            pg: svbool2_t,
+            base: *mut i32,
+            offsets: svint64_t,
+        );
+    }
+    _svst1w_scatter_s64offset_s64(simd_cast(data), pg.into(), base, offsets)
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[s64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_scatter_s64offset_u64(
+    pg: svbool_t,
+    base: *mut u8,
+    offsets: svint64_t,
+    data: svuint64_t,
+) {
+    svst1b_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed())
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_s64offset_u64(
+    pg: svbool_t,
+    base: *mut u16,
+    offsets: svint64_t,
+    data: svuint64_t,
+) {
+    svst1h_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed())
+}
+#[doc = "Truncate to 32 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1w_scatter_s64offset_u64(
+    pg: svbool_t,
+    base: *mut u32,
+    offsets: svint64_t,
+    data: svuint64_t,
+) {
+    svst1w_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed())
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u32]offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_scatter_u32offset_s32(
+    pg: svbool_t,
+    base: *mut i8,
+    offsets: svuint32_t,
+    data: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i8"
+        )]
+        fn _svst1b_scatter_u32offset_s32(
+            data: nxv4i8,
+            pg: svbool4_t,
+            base: *mut i8,
+            offsets: svint32_t,
+        );
+    }
+    _svst1b_scatter_u32offset_s32(simd_cast(data), pg.into(), base, offsets.as_signed())
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u32offset_s32(
+    pg: svbool_t,
+    base: *mut i16,
+    offsets: svuint32_t,
+    data: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.uxtw.nxv4i16"
+        )]
+        fn _svst1h_scatter_u32offset_s32(
+            data: nxv4i16,
+            pg: svbool4_t,
+            base: *mut i16,
+            offsets: svint32_t,
+        );
+    }
+    _svst1h_scatter_u32offset_s32(simd_cast(data), pg.into(), base, offsets.as_signed())
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u32]offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_scatter_u32offset_u32(
+    pg: svbool_t,
+    base: *mut u8,
+    offsets: svuint32_t,
+    data: svuint32_t,
+) {
+    svst1b_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed())
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u32offset_u32(
+    pg: svbool_t,
+    base: *mut u16,
+    offsets: svuint32_t,
+    data: svuint32_t,
+) {
+    svst1h_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed())
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_scatter_u64offset_s64(
+    pg: svbool_t,
+    base: *mut i8,
+    offsets: svuint64_t,
+    data: svint64_t,
+) {
+    svst1b_scatter_s64offset_s64(pg, base, offsets.as_signed(), data)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u64offset_s64(
+    pg: svbool_t,
+    base: *mut i16,
+    offsets: svuint64_t,
+    data: svint64_t,
+) {
+    svst1h_scatter_s64offset_s64(pg, base, offsets.as_signed(), data)
+}
+#[doc = "Truncate to 32 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1w_scatter_u64offset_s64(
+    pg: svbool_t,
+    base: *mut i32,
+    offsets: svuint64_t,
+    data: svint64_t,
+) {
+    svst1w_scatter_s64offset_s64(pg, base, offsets.as_signed(), data)
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter_[u64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_scatter_u64offset_u64(
+    pg: svbool_t,
+    base: *mut u8,
+    offsets: svuint64_t,
+    data: svuint64_t,
+) {
+    svst1b_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed())
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u64offset_u64(
+    pg: svbool_t,
+    base: *mut u16,
+    offsets: svuint64_t,
+    data: svuint64_t,
+) {
+    svst1h_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed())
+}
+#[doc = "Truncate to 32 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1w_scatter_u64offset_u64(
+    pg: svbool_t,
+    base: *mut u32,
+    offsets: svuint64_t,
+    data: svuint64_t,
+) {
+    svst1w_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed())
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base]_offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_scatter_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+    data: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32"
+        )]
+        fn _svst1b_scatter_u32base_offset_s32(
+            data: nxv4i8,
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        );
+    }
+    _svst1b_scatter_u32base_offset_s32(simd_cast(data), pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+    data: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32"
+        )]
+        fn _svst1h_scatter_u32base_offset_s32(
+            data: nxv4i16,
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        );
+    }
+    _svst1h_scatter_u32base_offset_s32(simd_cast(data), pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base]_offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_scatter_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+    data: svuint32_t,
+) {
+    svst1b_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed())
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+    data: svuint32_t,
+) {
+    svst1h_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed())
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base]_offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_scatter_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64"
+        )]
+        fn _svst1b_scatter_u64base_offset_s64(
+            data: nxv2i8,
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        );
+    }
+    _svst1b_scatter_u64base_offset_s64(simd_cast(data), pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64"
+        )]
+        fn _svst1h_scatter_u64base_offset_s64(
+            data: nxv2i16,
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        );
+    }
+    _svst1h_scatter_u64base_offset_s64(simd_cast(data), pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Truncate to 32 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1w_scatter_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i32.nxv2i64"
+        )]
+        fn _svst1w_scatter_u64base_offset_s64(
+            data: nxv2i32,
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        );
+    }
+    _svst1w_scatter_u64base_offset_s64(simd_cast(data), pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base]_offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_scatter_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+    data: svuint64_t,
+) {
+    svst1b_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed())
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+    data: svuint64_t,
+) {
+    svst1h_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed())
+}
+#[doc = "Truncate to 32 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1w_scatter_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+    data: svuint64_t,
+) {
+    svst1w_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed())
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) {
+    svst1b_scatter_u32base_offset_s32(pg, bases, 0, data)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) {
+    svst1h_scatter_u32base_offset_s32(pg, bases, 0, data)
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u32base_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) {
+    svst1b_scatter_u32base_offset_u32(pg, bases, 0, data)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) {
+    svst1h_scatter_u32base_offset_u32(pg, bases, 0, data)
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) {
+    svst1b_scatter_u64base_offset_s64(pg, bases, 0, data)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) {
+    svst1h_scatter_u64base_offset_s64(pg, bases, 0, data)
+}
+#[doc = "Truncate to 32 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1w_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) {
+    svst1w_scatter_u64base_offset_s64(pg, bases, 0, data)
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_scatter[_u64base_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) {
+    svst1b_scatter_u64base_offset_u64(pg, bases, 0, data)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) {
+    svst1h_scatter_u64base_offset_u64(pg, bases, 0, data)
+}
+#[doc = "Truncate to 32 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1w_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) {
+    svst1w_scatter_u64base_offset_u64(pg, bases, 0, data)
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_vnum_s16(pg: svbool_t, base: *mut i8, vnum: i64, data: svint16_t) {
+    svst1b_s16(pg, base.offset(svcnth() as isize * vnum as isize), data)
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_vnum_s32(pg: svbool_t, base: *mut i8, vnum: i64, data: svint32_t) {
+    svst1b_s32(pg, base.offset(svcntw() as isize * vnum as isize), data)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_vnum_s32(pg: svbool_t, base: *mut i16, vnum: i64, data: svint32_t) {
+    svst1h_s32(pg, base.offset(svcntw() as isize * vnum as isize), data)
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_vnum_s64(pg: svbool_t, base: *mut i8, vnum: i64, data: svint64_t) {
+    svst1b_s64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_vnum_s64(pg: svbool_t, base: *mut i16, vnum: i64, data: svint64_t) {
+    svst1h_s64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Truncate to 32 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_vnum[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1w_vnum_s64(pg: svbool_t, base: *mut i32, vnum: i64, data: svint64_t) {
+    svst1w_s64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_vnum_u16(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint16_t) {
+    svst1b_u16(pg, base.offset(svcnth() as isize * vnum as isize), data)
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_vnum_u32(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint32_t) {
+    svst1b_u32(pg, base.offset(svcntw() as isize * vnum as isize), data)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_vnum_u32(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint32_t) {
+    svst1h_u32(pg, base.offset(svcntw() as isize * vnum as isize), data)
+}
+#[doc = "Truncate to 8 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1b_vnum[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1b))]
+pub unsafe fn svst1b_vnum_u64(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint64_t) {
+    svst1b_u64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_vnum[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_vnum_u64(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint64_t) {
+    svst1h_u64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Truncate to 32 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_vnum[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1w_vnum_u64(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint64_t) {
+    svst1w_u64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]index[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_s32index_s32(
+    pg: svbool_t,
+    base: *mut i16,
+    indices: svint32_t,
+    data: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i16"
+        )]
+        fn _svst1h_scatter_s32index_s32(
+            data: nxv4i16,
+            pg: svbool4_t,
+            base: *mut i16,
+            indices: svint32_t,
+        );
+    }
+    _svst1h_scatter_s32index_s32(simd_cast(data), pg.into(), base, indices)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s32]index[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_s32index_u32(
+    pg: svbool_t,
+    base: *mut u16,
+    indices: svint32_t,
+    data: svuint32_t,
+) {
+    svst1h_scatter_s32index_s32(pg, base.as_signed(), indices, data.as_signed())
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_s64index_s64(
+    pg: svbool_t,
+    base: *mut i16,
+    indices: svint64_t,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i16"
+        )]
+        fn _svst1h_scatter_s64index_s64(
+            data: nxv2i16,
+            pg: svbool2_t,
+            base: *mut i16,
+            indices: svint64_t,
+        );
+    }
+    _svst1h_scatter_s64index_s64(simd_cast(data), pg.into(), base, indices)
+}
+#[doc = "Truncate to 32 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1w_scatter_s64index_s64(
+    pg: svbool_t,
+    base: *mut i32,
+    indices: svint64_t,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.index.nxv2i32"
+        )]
+        fn _svst1w_scatter_s64index_s64(
+            data: nxv2i32,
+            pg: svbool2_t,
+            base: *mut i32,
+            indices: svint64_t,
+        );
+    }
+    _svst1w_scatter_s64index_s64(simd_cast(data), pg.into(), base, indices)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[s64]index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_s64index_u64(
+    pg: svbool_t,
+    base: *mut u16,
+    indices: svint64_t,
+    data: svuint64_t,
+) {
+    svst1h_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed())
+}
+#[doc = "Truncate to 32 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[s64]index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1w_scatter_s64index_u64(
+    pg: svbool_t,
+    base: *mut u32,
+    indices: svint64_t,
+    data: svuint64_t,
+) {
+    svst1w_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed())
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]index[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u32index_s32(
+    pg: svbool_t,
+    base: *mut i16,
+    indices: svuint32_t,
+    data: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i16"
+        )]
+        fn _svst1h_scatter_u32index_s32(
+            data: nxv4i16,
+            pg: svbool4_t,
+            base: *mut i16,
+            indices: svint32_t,
+        );
+    }
+    _svst1h_scatter_u32index_s32(simd_cast(data), pg.into(), base, indices.as_signed())
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u32]index[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u32index_u32(
+    pg: svbool_t,
+    base: *mut u16,
+    indices: svuint32_t,
+    data: svuint32_t,
+) {
+    svst1h_scatter_u32index_s32(pg, base.as_signed(), indices, data.as_signed())
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u64index_s64(
+    pg: svbool_t,
+    base: *mut i16,
+    indices: svuint64_t,
+    data: svint64_t,
+) {
+    svst1h_scatter_s64index_s64(pg, base, indices.as_signed(), data)
+}
+#[doc = "Truncate to 32 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1w_scatter_u64index_s64(
+    pg: svbool_t,
+    base: *mut i32,
+    indices: svuint64_t,
+    data: svint64_t,
+) {
+    svst1w_scatter_s64index_s64(pg, base, indices.as_signed(), data)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter_[u64]index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u64index_u64(
+    pg: svbool_t,
+    base: *mut u16,
+    indices: svuint64_t,
+    data: svuint64_t,
+) {
+    svst1h_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed())
+}
+#[doc = "Truncate to 32 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter_[u64]index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1w_scatter_u64index_u64(
+    pg: svbool_t,
+    base: *mut u32,
+    indices: svuint64_t,
+    data: svuint64_t,
+) {
+    svst1w_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed())
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_index[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u32base_index_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+    data: svint32_t,
+) {
+    svst1h_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(1), data)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u32base]_index[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u32base_index_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+    data: svuint32_t,
+) {
+    svst1h_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(1), data)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+    data: svint64_t,
+) {
+    svst1h_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(1), data)
+}
+#[doc = "Truncate to 32 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1w_scatter_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+    data: svint64_t,
+) {
+    svst1w_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(2), data)
+}
+#[doc = "Truncate to 16 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1h_scatter[_u64base]_index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1h))]
+pub unsafe fn svst1h_scatter_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+    data: svuint64_t,
+) {
+    svst1h_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(1), data)
+}
+#[doc = "Truncate to 32 bits and store"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst1w_scatter[_u64base]_index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st1w))]
+pub unsafe fn svst1w_scatter_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+    data: svuint64_t,
+) {
+    svst1w_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(2), data)
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2w))]
+pub unsafe fn svst2_f32(pg: svbool_t, base: *mut f32, data: svfloat32x2_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv4f32")]
+        fn _svst2_f32(data0: svfloat32_t, data1: svfloat32_t, pg: svbool4_t, ptr: *mut f32);
+    }
+    _svst2_f32(
+        svget2_f32::<0>(data),
+        svget2_f32::<1>(data),
+        pg.into(),
+        base,
+    )
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2d))]
+pub unsafe fn svst2_f64(pg: svbool_t, base: *mut f64, data: svfloat64x2_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv2f64")]
+        fn _svst2_f64(data0: svfloat64_t, data1: svfloat64_t, pg: svbool2_t, ptr: *mut f64);
+    }
+    _svst2_f64(
+        svget2_f64::<0>(data),
+        svget2_f64::<1>(data),
+        pg.into(),
+        base,
+    )
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2b))]
+pub unsafe fn svst2_s8(pg: svbool_t, base: *mut i8, data: svint8x2_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv16i8")]
+        fn _svst2_s8(data0: svint8_t, data1: svint8_t, pg: svbool_t, ptr: *mut i8);
+    }
+    _svst2_s8(svget2_s8::<0>(data), svget2_s8::<1>(data), pg, base)
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2h))]
+pub unsafe fn svst2_s16(pg: svbool_t, base: *mut i16, data: svint16x2_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv8i16")]
+        fn _svst2_s16(data0: svint16_t, data1: svint16_t, pg: svbool8_t, ptr: *mut i16);
+    }
+    _svst2_s16(
+        svget2_s16::<0>(data),
+        svget2_s16::<1>(data),
+        pg.into(),
+        base,
+    )
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2w))]
+pub unsafe fn svst2_s32(pg: svbool_t, base: *mut i32, data: svint32x2_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv4i32")]
+        fn _svst2_s32(data0: svint32_t, data1: svint32_t, pg: svbool4_t, ptr: *mut i32);
+    }
+    _svst2_s32(
+        svget2_s32::<0>(data),
+        svget2_s32::<1>(data),
+        pg.into(),
+        base,
+    )
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2d))]
+pub unsafe fn svst2_s64(pg: svbool_t, base: *mut i64, data: svint64x2_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st2.nxv2i64")]
+        fn _svst2_s64(data0: svint64_t, data1: svint64_t, pg: svbool2_t, ptr: *mut i64);
+    }
+    _svst2_s64(
+        svget2_s64::<0>(data),
+        svget2_s64::<1>(data),
+        pg.into(),
+        base,
+    )
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2b))]
+pub unsafe fn svst2_u8(pg: svbool_t, base: *mut u8, data: svuint8x2_t) {
+    svst2_s8(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2h))]
+pub unsafe fn svst2_u16(pg: svbool_t, base: *mut u16, data: svuint16x2_t) {
+    svst2_s16(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2w))]
+pub unsafe fn svst2_u32(pg: svbool_t, base: *mut u32, data: svuint32x2_t) {
+    svst2_s32(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2d))]
+pub unsafe fn svst2_u64(pg: svbool_t, base: *mut u64, data: svuint64x2_t) {
+    svst2_s64(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2w))]
+pub unsafe fn svst2_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x2_t) {
+    svst2_f32(pg, base.offset(svcntw() as isize * vnum as isize), data)
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2d))]
+pub unsafe fn svst2_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x2_t) {
+    svst2_f64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2b))]
+pub unsafe fn svst2_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x2_t) {
+    svst2_s8(pg, base.offset(svcntb() as isize * vnum as isize), data)
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2h))]
+pub unsafe fn svst2_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x2_t) {
+    svst2_s16(pg, base.offset(svcnth() as isize * vnum as isize), data)
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2w))]
+pub unsafe fn svst2_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x2_t) {
+    svst2_s32(pg, base.offset(svcntw() as isize * vnum as isize), data)
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2d))]
+pub unsafe fn svst2_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x2_t) {
+    svst2_s64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2b))]
+pub unsafe fn svst2_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x2_t) {
+    svst2_u8(pg, base.offset(svcntb() as isize * vnum as isize), data)
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2h))]
+pub unsafe fn svst2_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x2_t) {
+    svst2_u16(pg, base.offset(svcnth() as isize * vnum as isize), data)
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2w))]
+pub unsafe fn svst2_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x2_t) {
+    svst2_u32(pg, base.offset(svcntw() as isize * vnum as isize), data)
+}
+#[doc = "Store two vectors into two-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst2_vnum[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st2d))]
+pub unsafe fn svst2_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x2_t) {
+    svst2_u64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3w))]
+pub unsafe fn svst3_f32(pg: svbool_t, base: *mut f32, data: svfloat32x3_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv4f32")]
+        fn _svst3_f32(
+            data0: svfloat32_t,
+            data1: svfloat32_t,
+            data2: svfloat32_t,
+            pg: svbool4_t,
+            ptr: *mut f32,
+        );
+    }
+    _svst3_f32(
+        svget3_f32::<0>(data),
+        svget3_f32::<1>(data),
+        svget3_f32::<2>(data),
+        pg.into(),
+        base,
+    )
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3d))]
+pub unsafe fn svst3_f64(pg: svbool_t, base: *mut f64, data: svfloat64x3_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv2f64")]
+        fn _svst3_f64(
+            data0: svfloat64_t,
+            data1: svfloat64_t,
+            data2: svfloat64_t,
+            pg: svbool2_t,
+            ptr: *mut f64,
+        );
+    }
+    _svst3_f64(
+        svget3_f64::<0>(data),
+        svget3_f64::<1>(data),
+        svget3_f64::<2>(data),
+        pg.into(),
+        base,
+    )
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3b))]
+pub unsafe fn svst3_s8(pg: svbool_t, base: *mut i8, data: svint8x3_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv16i8")]
+        fn _svst3_s8(data0: svint8_t, data1: svint8_t, data2: svint8_t, pg: svbool_t, ptr: *mut i8);
+    }
+    _svst3_s8(
+        svget3_s8::<0>(data),
+        svget3_s8::<1>(data),
+        svget3_s8::<2>(data),
+        pg,
+        base,
+    )
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3h))]
+pub unsafe fn svst3_s16(pg: svbool_t, base: *mut i16, data: svint16x3_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv8i16")]
+        fn _svst3_s16(
+            data0: svint16_t,
+            data1: svint16_t,
+            data2: svint16_t,
+            pg: svbool8_t,
+            ptr: *mut i16,
+        );
+    }
+    _svst3_s16(
+        svget3_s16::<0>(data),
+        svget3_s16::<1>(data),
+        svget3_s16::<2>(data),
+        pg.into(),
+        base,
+    )
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3w))]
+pub unsafe fn svst3_s32(pg: svbool_t, base: *mut i32, data: svint32x3_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv4i32")]
+        fn _svst3_s32(
+            data0: svint32_t,
+            data1: svint32_t,
+            data2: svint32_t,
+            pg: svbool4_t,
+            ptr: *mut i32,
+        );
+    }
+    _svst3_s32(
+        svget3_s32::<0>(data),
+        svget3_s32::<1>(data),
+        svget3_s32::<2>(data),
+        pg.into(),
+        base,
+    )
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3d))]
+pub unsafe fn svst3_s64(pg: svbool_t, base: *mut i64, data: svint64x3_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st3.nxv2i64")]
+        fn _svst3_s64(
+            data0: svint64_t,
+            data1: svint64_t,
+            data2: svint64_t,
+            pg: svbool2_t,
+            ptr: *mut i64,
+        );
+    }
+    _svst3_s64(
+        svget3_s64::<0>(data),
+        svget3_s64::<1>(data),
+        svget3_s64::<2>(data),
+        pg.into(),
+        base,
+    )
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3b))]
+pub unsafe fn svst3_u8(pg: svbool_t, base: *mut u8, data: svuint8x3_t) {
+    svst3_s8(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3h))]
+pub unsafe fn svst3_u16(pg: svbool_t, base: *mut u16, data: svuint16x3_t) {
+    svst3_s16(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3w))]
+pub unsafe fn svst3_u32(pg: svbool_t, base: *mut u32, data: svuint32x3_t) {
+    svst3_s32(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3d))]
+pub unsafe fn svst3_u64(pg: svbool_t, base: *mut u64, data: svuint64x3_t) {
+    svst3_s64(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3w))]
+pub unsafe fn svst3_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x3_t) {
+    svst3_f32(pg, base.offset(svcntw() as isize * vnum as isize), data)
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3d))]
+pub unsafe fn svst3_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x3_t) {
+    svst3_f64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3b))]
+pub unsafe fn svst3_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x3_t) {
+    svst3_s8(pg, base.offset(svcntb() as isize * vnum as isize), data)
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3h))]
+pub unsafe fn svst3_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x3_t) {
+    svst3_s16(pg, base.offset(svcnth() as isize * vnum as isize), data)
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3w))]
+pub unsafe fn svst3_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x3_t) {
+    svst3_s32(pg, base.offset(svcntw() as isize * vnum as isize), data)
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3d))]
+pub unsafe fn svst3_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x3_t) {
+    svst3_s64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3b))]
+pub unsafe fn svst3_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x3_t) {
+    svst3_u8(pg, base.offset(svcntb() as isize * vnum as isize), data)
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3h))]
+pub unsafe fn svst3_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x3_t) {
+    svst3_u16(pg, base.offset(svcnth() as isize * vnum as isize), data)
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3w))]
+pub unsafe fn svst3_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x3_t) {
+    svst3_u32(pg, base.offset(svcntw() as isize * vnum as isize), data)
+}
+#[doc = "Store three vectors into three-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst3_vnum[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st3d))]
+pub unsafe fn svst3_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x3_t) {
+    svst3_u64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4w))]
+pub unsafe fn svst4_f32(pg: svbool_t, base: *mut f32, data: svfloat32x4_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv4f32")]
+        fn _svst4_f32(
+            data0: svfloat32_t,
+            data1: svfloat32_t,
+            data2: svfloat32_t,
+            data3: svfloat32_t,
+            pg: svbool4_t,
+            ptr: *mut f32,
+        );
+    }
+    _svst4_f32(
+        svget4_f32::<0>(data),
+        svget4_f32::<1>(data),
+        svget4_f32::<2>(data),
+        svget4_f32::<3>(data),
+        pg.into(),
+        base,
+    )
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4d))]
+pub unsafe fn svst4_f64(pg: svbool_t, base: *mut f64, data: svfloat64x4_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv2f64")]
+        fn _svst4_f64(
+            data0: svfloat64_t,
+            data1: svfloat64_t,
+            data2: svfloat64_t,
+            data3: svfloat64_t,
+            pg: svbool2_t,
+            ptr: *mut f64,
+        );
+    }
+    _svst4_f64(
+        svget4_f64::<0>(data),
+        svget4_f64::<1>(data),
+        svget4_f64::<2>(data),
+        svget4_f64::<3>(data),
+        pg.into(),
+        base,
+    )
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4b))]
+pub unsafe fn svst4_s8(pg: svbool_t, base: *mut i8, data: svint8x4_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv16i8")]
+        fn _svst4_s8(
+            data0: svint8_t,
+            data1: svint8_t,
+            data2: svint8_t,
+            data3: svint8_t,
+            pg: svbool_t,
+            ptr: *mut i8,
+        );
+    }
+    _svst4_s8(
+        svget4_s8::<0>(data),
+        svget4_s8::<1>(data),
+        svget4_s8::<2>(data),
+        svget4_s8::<3>(data),
+        pg,
+        base,
+    )
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4h))]
+pub unsafe fn svst4_s16(pg: svbool_t, base: *mut i16, data: svint16x4_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv8i16")]
+        fn _svst4_s16(
+            data0: svint16_t,
+            data1: svint16_t,
+            data2: svint16_t,
+            data3: svint16_t,
+            pg: svbool8_t,
+            ptr: *mut i16,
+        );
+    }
+    _svst4_s16(
+        svget4_s16::<0>(data),
+        svget4_s16::<1>(data),
+        svget4_s16::<2>(data),
+        svget4_s16::<3>(data),
+        pg.into(),
+        base,
+    )
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4w))]
+pub unsafe fn svst4_s32(pg: svbool_t, base: *mut i32, data: svint32x4_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv4i32")]
+        fn _svst4_s32(
+            data0: svint32_t,
+            data1: svint32_t,
+            data2: svint32_t,
+            data3: svint32_t,
+            pg: svbool4_t,
+            ptr: *mut i32,
+        );
+    }
+    _svst4_s32(
+        svget4_s32::<0>(data),
+        svget4_s32::<1>(data),
+        svget4_s32::<2>(data),
+        svget4_s32::<3>(data),
+        pg.into(),
+        base,
+    )
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4d))]
+pub unsafe fn svst4_s64(pg: svbool_t, base: *mut i64, data: svint64x4_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.st4.nxv2i64")]
+        fn _svst4_s64(
+            data0: svint64_t,
+            data1: svint64_t,
+            data2: svint64_t,
+            data3: svint64_t,
+            pg: svbool2_t,
+            ptr: *mut i64,
+        );
+    }
+    _svst4_s64(
+        svget4_s64::<0>(data),
+        svget4_s64::<1>(data),
+        svget4_s64::<2>(data),
+        svget4_s64::<3>(data),
+        pg.into(),
+        base,
+    )
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4b))]
+pub unsafe fn svst4_u8(pg: svbool_t, base: *mut u8, data: svuint8x4_t) {
+    svst4_s8(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4h))]
+pub unsafe fn svst4_u16(pg: svbool_t, base: *mut u16, data: svuint16x4_t) {
+    svst4_s16(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4w))]
+pub unsafe fn svst4_u32(pg: svbool_t, base: *mut u32, data: svuint32x4_t) {
+    svst4_s32(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4d))]
+pub unsafe fn svst4_u64(pg: svbool_t, base: *mut u64, data: svuint64x4_t) {
+    svst4_s64(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4w))]
+pub unsafe fn svst4_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32x4_t) {
+    svst4_f32(pg, base.offset(svcntw() as isize * vnum as isize), data)
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4d))]
+pub unsafe fn svst4_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64x4_t) {
+    svst4_f64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4b))]
+pub unsafe fn svst4_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8x4_t) {
+    svst4_s8(pg, base.offset(svcntb() as isize * vnum as isize), data)
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4h))]
+pub unsafe fn svst4_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16x4_t) {
+    svst4_s16(pg, base.offset(svcnth() as isize * vnum as isize), data)
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4w))]
+pub unsafe fn svst4_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32x4_t) {
+    svst4_s32(pg, base.offset(svcntw() as isize * vnum as isize), data)
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4d))]
+pub unsafe fn svst4_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64x4_t) {
+    svst4_s64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4b))]
+pub unsafe fn svst4_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8x4_t) {
+    svst4_u8(pg, base.offset(svcntb() as isize * vnum as isize), data)
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4h))]
+pub unsafe fn svst4_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16x4_t) {
+    svst4_u16(pg, base.offset(svcnth() as isize * vnum as isize), data)
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4w))]
+pub unsafe fn svst4_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32x4_t) {
+    svst4_u32(pg, base.offset(svcntw() as isize * vnum as isize), data)
+}
+#[doc = "Store four vectors into four-element tuples"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svst4_vnum[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(st4d))]
+pub unsafe fn svst4_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64x4_t) {
+    svst4_u64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv4f32")]
+        fn _svstnt1_f32(data: svfloat32_t, pg: svbool4_t, ptr: *mut f32);
+    }
+    _svstnt1_f32(data, pg.into(), base)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv2f64")]
+        fn _svstnt1_f64(data: svfloat64_t, pg: svbool2_t, ptr: *mut f64);
+    }
+    _svstnt1_f64(data, pg.into(), base)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1b))]
+pub unsafe fn svstnt1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv16i8")]
+        fn _svstnt1_s8(data: svint8_t, pg: svbool_t, ptr: *mut i8);
+    }
+    _svstnt1_s8(data, pg, base)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv8i16")]
+        fn _svstnt1_s16(data: svint16_t, pg: svbool8_t, ptr: *mut i16);
+    }
+    _svstnt1_s16(data, pg.into(), base)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv4i32")]
+        fn _svstnt1_s32(data: svint32_t, pg: svbool4_t, ptr: *mut i32);
+    }
+    _svstnt1_s32(data, pg.into(), base)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.stnt1.nxv2i64")]
+        fn _svstnt1_s64(data: svint64_t, pg: svbool2_t, ptr: *mut i64);
+    }
+    _svstnt1_s64(data, pg.into(), base)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1b))]
+pub unsafe fn svstnt1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) {
+    svstnt1_s8(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) {
+    svstnt1_s16(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) {
+    svstnt1_s32(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) {
+    svstnt1_s64(pg, base.as_signed(), data.as_signed())
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfloat32_t) {
+    svstnt1_f32(pg, base.offset(svcntw() as isize * vnum as isize), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfloat64_t) {
+    svstnt1_f64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1b))]
+pub unsafe fn svstnt1_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8_t) {
+    svstnt1_s8(pg, base.offset(svcntb() as isize * vnum as isize), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svint16_t) {
+    svstnt1_s16(pg, base.offset(svcnth() as isize * vnum as isize), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svint32_t) {
+    svstnt1_s32(pg, base.offset(svcntw() as isize * vnum as isize), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svint64_t) {
+    svstnt1_s64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1b))]
+pub unsafe fn svstnt1_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint8_t) {
+    svstnt1_u8(pg, base.offset(svcntb() as isize * vnum as isize), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svuint16_t) {
+    svstnt1_u16(pg, base.offset(svcnth() as isize * vnum as isize), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svuint32_t) {
+    svstnt1_u32(pg, base.offset(svcntw() as isize * vnum as isize), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_vnum[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svuint64_t) {
+    svstnt1_u64(pg, base.offset(svcntd() as isize * vnum as isize), data)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsub))]
+pub fn svsub_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv4f32")]
+        fn _svsub_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svsub_f32_m(pg.into(), op1, op2) }
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsub))]
+pub fn svsub_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svsub_f32_m(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsub))]
+pub fn svsub_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svsub_f32_m(pg, op1, op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsub))]
+pub fn svsub_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svsub_f32_x(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsub))]
+pub fn svsub_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svsub_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsub))]
+pub fn svsub_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svsub_f32_z(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsub))]
+pub fn svsub_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsub.nxv2f64")]
+        fn _svsub_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svsub_f64_m(pg.into(), op1, op2) }
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsub))]
+pub fn svsub_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svsub_f64_m(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsub))]
+pub fn svsub_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svsub_f64_m(pg, op1, op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsub))]
+pub fn svsub_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svsub_f64_x(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsub))]
+pub fn svsub_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svsub_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsub))]
+pub fn svsub_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svsub_f64_z(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv16i8")]
+        fn _svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svsub_s8_m(pg, op1, op2) }
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svsub_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svsub_s8_m(pg, op1, op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svsub_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svsub_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv8i16")]
+        fn _svsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svsub_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svsub_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svsub_s16_m(pg, op1, op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svsub_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svsub_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv4i32")]
+        fn _svsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svsub_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svsub_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svsub_s32_m(pg, op1, op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svsub_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svsub_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sub.nxv2i64")]
+        fn _svsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svsub_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svsub_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svsub_s64_m(pg, op1, op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svsub_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svsub_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svsub_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svsub_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svsub_u8_m(pg, op1, op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svsub_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svsub_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svsub_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svsub_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svsub_u16_m(pg, op1, op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svsub_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svsub_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svsub_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svsub_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svsub_u32_m(pg, op1, op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svsub_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svsub_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svsub_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svsub_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svsub_u64_m(pg, op1, op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svsub_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(sub))]
+pub fn svsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svsub_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsubr))]
+pub fn svsubr_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsubr.nxv4f32")]
+        fn _svsubr_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svsubr_f32_m(pg.into(), op1, op2) }
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsubr))]
+pub fn svsubr_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svsubr_f32_m(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsubr))]
+pub fn svsubr_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svsubr_f32_m(pg, op1, op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsubr))]
+pub fn svsubr_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svsubr_f32_x(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsubr))]
+pub fn svsubr_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svsubr_f32_m(pg, svsel_f32(pg, op1, svdup_n_f32(0.0)), op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsubr))]
+pub fn svsubr_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t {
+    svsubr_f32_z(pg, op1, svdup_n_f32(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsubr))]
+pub fn svsubr_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fsubr.nxv2f64")]
+        fn _svsubr_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svsubr_f64_m(pg.into(), op1, op2) }
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsubr))]
+pub fn svsubr_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svsubr_f64_m(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsubr))]
+pub fn svsubr_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svsubr_f64_m(pg, op1, op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsubr))]
+pub fn svsubr_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svsubr_f64_x(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsubr))]
+pub fn svsubr_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svsubr_f64_m(pg, svsel_f64(pg, op1, svdup_n_f64(0.0)), op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(fsubr))]
+pub fn svsubr_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t {
+    svsubr_f64_z(pg, op1, svdup_n_f64(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv16i8")]
+        fn _svsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svsubr_s8_m(pg, op1, op2) }
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svsubr_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svsubr_s8_m(pg, op1, op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svsubr_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svsubr_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv8i16")]
+        fn _svsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svsubr_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svsubr_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svsubr_s16_m(pg, op1, op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svsubr_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svsubr_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv4i32")]
+        fn _svsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svsubr_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svsubr_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svsubr_s32_m(pg, op1, op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svsubr_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svsubr_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subr.nxv2i64")]
+        fn _svsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svsubr_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svsubr_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svsubr_s64_m(pg, op1, op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svsubr_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svsubr_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svsubr_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svsubr_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svsubr_u8_m(pg, op1, op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svsubr_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svsubr_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svsubr_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svsubr_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svsubr_u16_m(pg, op1, op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svsubr_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svsubr_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svsubr_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svsubr_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svsubr_u32_m(pg, op1, op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svsubr_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svsubr_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svsubr_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svsubr_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svsubr_u64_m(pg, op1, op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svsubr_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(subr))]
+pub fn svsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svsubr_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Dot product (signed × unsigned)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,i8mm")]
+#[cfg_attr(test, assert_instr(sudot, IMM_INDEX = 0))]
+pub fn svsudot_lane_s32<const IMM_INDEX: i32>(
+    op1: svint32_t,
+    op2: svint8_t,
+    op3: svuint8_t,
+) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sudot.lane.nxv4i32"
+        )]
+        fn _svsudot_lane_s32(
+            op1: svint32_t,
+            op2: svint8_t,
+            op3: svint8_t,
+            imm_index: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svsudot_lane_s32(op1, op2, op3.as_signed(), IMM_INDEX) }
+}
+#[doc = "Dot product (signed × unsigned)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,i8mm")]
+#[cfg_attr(test, assert_instr(usdot))]
+pub fn svsudot_s32(op1: svint32_t, op2: svint8_t, op3: svuint8_t) -> svint32_t {
+    svusdot_s32(op1, op3, op2)
+}
+#[doc = "Dot product (signed × unsigned)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,i8mm")]
+#[cfg_attr(test, assert_instr(usdot))]
+pub fn svsudot_n_s32(op1: svint32_t, op2: svint8_t, op3: u8) -> svint32_t {
+    svsudot_s32(op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Table lookup in single-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl_f32(data: svfloat32_t, indices: svuint32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv4f32")]
+        fn _svtbl_f32(data: svfloat32_t, indices: svint32_t) -> svfloat32_t;
+    }
+    unsafe { _svtbl_f32(data, indices.as_signed()) }
+}
+#[doc = "Table lookup in single-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl_f64(data: svfloat64_t, indices: svuint64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv2f64")]
+        fn _svtbl_f64(data: svfloat64_t, indices: svint64_t) -> svfloat64_t;
+    }
+    unsafe { _svtbl_f64(data, indices.as_signed()) }
+}
+#[doc = "Table lookup in single-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl_s8(data: svint8_t, indices: svuint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv16i8")]
+        fn _svtbl_s8(data: svint8_t, indices: svint8_t) -> svint8_t;
+    }
+    unsafe { _svtbl_s8(data, indices.as_signed()) }
+}
+#[doc = "Table lookup in single-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl_s16(data: svint16_t, indices: svuint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv8i16")]
+        fn _svtbl_s16(data: svint16_t, indices: svint16_t) -> svint16_t;
+    }
+    unsafe { _svtbl_s16(data, indices.as_signed()) }
+}
+#[doc = "Table lookup in single-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl_s32(data: svint32_t, indices: svuint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv4i32")]
+        fn _svtbl_s32(data: svint32_t, indices: svint32_t) -> svint32_t;
+    }
+    unsafe { _svtbl_s32(data, indices.as_signed()) }
+}
+#[doc = "Table lookup in single-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl_s64(data: svint64_t, indices: svuint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl.nxv2i64")]
+        fn _svtbl_s64(data: svint64_t, indices: svint64_t) -> svint64_t;
+    }
+    unsafe { _svtbl_s64(data, indices.as_signed()) }
+}
+#[doc = "Table lookup in single-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl_u8(data: svuint8_t, indices: svuint8_t) -> svuint8_t {
+    unsafe { svtbl_s8(data.as_signed(), indices).as_unsigned() }
+}
+#[doc = "Table lookup in single-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl_u16(data: svuint16_t, indices: svuint16_t) -> svuint16_t {
+    unsafe { svtbl_s16(data.as_signed(), indices).as_unsigned() }
+}
+#[doc = "Table lookup in single-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl_u32(data: svuint32_t, indices: svuint32_t) -> svuint32_t {
+    unsafe { svtbl_s32(data.as_signed(), indices).as_unsigned() }
+}
+#[doc = "Table lookup in single-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl_u64(data: svuint64_t, indices: svuint64_t) -> svuint64_t {
+    unsafe { svtbl_s64(data.as_signed(), indices).as_unsigned() }
+}
+#[doc = "Trigonometric multiply-add coefficient"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtmad[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ftmad, IMM3 = 0))]
+pub fn svtmad_f32<const IMM3: i32>(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    static_assert_range!(IMM3, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ftmad.x.nxv4f32"
+        )]
+        fn _svtmad_f32(op1: svfloat32_t, op2: svfloat32_t, imm3: i32) -> svfloat32_t;
+    }
+    unsafe { _svtmad_f32(op1, op2, IMM3) }
+}
+#[doc = "Trigonometric multiply-add coefficient"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtmad[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ftmad, IMM3 = 0))]
+pub fn svtmad_f64<const IMM3: i32>(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    static_assert_range!(IMM3, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ftmad.x.nxv2f64"
+        )]
+        fn _svtmad_f64(op1: svfloat64_t, op2: svfloat64_t, imm3: i32) -> svfloat64_t;
+    }
+    unsafe { _svtmad_f64(op1, op2, IMM3) }
+}
+#[doc = "Interleave even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv16i1")]
+        fn _svtrn1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t;
+    }
+    unsafe { _svtrn1_b8(op1, op2) }
+}
+#[doc = "Interleave even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv8i1")]
+        fn _svtrn1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t;
+    }
+    unsafe { _svtrn1_b16(op1.into(), op2.into()).into() }
+}
+#[doc = "Interleave even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4i1")]
+        fn _svtrn1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t;
+    }
+    unsafe { _svtrn1_b32(op1.into(), op2.into()).into() }
+}
+#[doc = "Interleave even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2i1")]
+        fn _svtrn1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t;
+    }
+    unsafe { _svtrn1_b64(op1.into(), op2.into()).into() }
+}
+#[doc = "Interleave even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4f32")]
+        fn _svtrn1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svtrn1_f32(op1, op2) }
+}
+#[doc = "Interleave even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2f64")]
+        fn _svtrn1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svtrn1_f64(op1, op2) }
+}
+#[doc = "Interleave even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv16i8")]
+        fn _svtrn1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svtrn1_s8(op1, op2) }
+}
+#[doc = "Interleave even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv8i16")]
+        fn _svtrn1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svtrn1_s16(op1, op2) }
+}
+#[doc = "Interleave even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv4i32")]
+        fn _svtrn1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svtrn1_s32(op1, op2) }
+}
+#[doc = "Interleave even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1.nxv2i64")]
+        fn _svtrn1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svtrn1_s64(op1, op2) }
+}
+#[doc = "Interleave even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svtrn1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svtrn1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svtrn1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svtrn1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_f32])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv4f32")]
+        fn _svtrn1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svtrn1q_f32(op1, op2) }
+}
+#[doc = "Interleave even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_f64])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv2f64")]
+        fn _svtrn1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svtrn1q_f64(op1, op2) }
+}
+#[doc = "Interleave even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv16i8")]
+        fn _svtrn1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svtrn1q_s8(op1, op2) }
+}
+#[doc = "Interleave even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv8i16")]
+        fn _svtrn1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svtrn1q_s16(op1, op2) }
+}
+#[doc = "Interleave even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv4i32")]
+        fn _svtrn1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svtrn1q_s32(op1, op2) }
+}
+#[doc = "Interleave even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn1q.nxv2i64")]
+        fn _svtrn1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svtrn1q_s64(op1, op2) }
+}
+#[doc = "Interleave even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svtrn1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svtrn1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svtrn1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn1))]
+pub fn svtrn1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svtrn1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv16i1")]
+        fn _svtrn2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t;
+    }
+    unsafe { _svtrn2_b8(op1, op2) }
+}
+#[doc = "Interleave odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv8i1")]
+        fn _svtrn2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t;
+    }
+    unsafe { _svtrn2_b16(op1.into(), op2.into()).into() }
+}
+#[doc = "Interleave odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4i1")]
+        fn _svtrn2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t;
+    }
+    unsafe { _svtrn2_b32(op1.into(), op2.into()).into() }
+}
+#[doc = "Interleave odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2i1")]
+        fn _svtrn2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t;
+    }
+    unsafe { _svtrn2_b64(op1.into(), op2.into()).into() }
+}
+#[doc = "Interleave odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4f32")]
+        fn _svtrn2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svtrn2_f32(op1, op2) }
+}
+#[doc = "Interleave odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2f64")]
+        fn _svtrn2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svtrn2_f64(op1, op2) }
+}
+#[doc = "Interleave odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv16i8")]
+        fn _svtrn2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svtrn2_s8(op1, op2) }
+}
+#[doc = "Interleave odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv8i16")]
+        fn _svtrn2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svtrn2_s16(op1, op2) }
+}
+#[doc = "Interleave odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv4i32")]
+        fn _svtrn2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svtrn2_s32(op1, op2) }
+}
+#[doc = "Interleave odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2.nxv2i64")]
+        fn _svtrn2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svtrn2_s64(op1, op2) }
+}
+#[doc = "Interleave odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svtrn2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svtrn2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svtrn2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svtrn2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_f32])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv4f32")]
+        fn _svtrn2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svtrn2q_f32(op1, op2) }
+}
+#[doc = "Interleave odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_f64])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv2f64")]
+        fn _svtrn2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svtrn2q_f64(op1, op2) }
+}
+#[doc = "Interleave odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv16i8")]
+        fn _svtrn2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svtrn2q_s8(op1, op2) }
+}
+#[doc = "Interleave odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv8i16")]
+        fn _svtrn2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svtrn2q_s16(op1, op2) }
+}
+#[doc = "Interleave odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv4i32")]
+        fn _svtrn2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svtrn2q_s32(op1, op2) }
+}
+#[doc = "Interleave odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.trn2q.nxv2i64")]
+        fn _svtrn2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svtrn2q_s64(op1, op2) }
+}
+#[doc = "Interleave odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svtrn2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svtrn2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svtrn2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(trn2))]
+pub fn svtrn2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svtrn2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Trigonometric starting value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtsmul[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ftsmul))]
+pub fn svtsmul_f32(op1: svfloat32_t, op2: svuint32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ftsmul.x.nxv4f32"
+        )]
+        fn _svtsmul_f32(op1: svfloat32_t, op2: svint32_t) -> svfloat32_t;
+    }
+    unsafe { _svtsmul_f32(op1, op2.as_signed()) }
+}
+#[doc = "Trigonometric starting value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtsmul[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ftsmul))]
+pub fn svtsmul_f64(op1: svfloat64_t, op2: svuint64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ftsmul.x.nxv2f64"
+        )]
+        fn _svtsmul_f64(op1: svfloat64_t, op2: svint64_t) -> svfloat64_t;
+    }
+    unsafe { _svtsmul_f64(op1, op2.as_signed()) }
+}
+#[doc = "Trigonometric select coefficient"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtssel[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ftssel))]
+pub fn svtssel_f32(op1: svfloat32_t, op2: svuint32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ftssel.x.nxv4f32"
+        )]
+        fn _svtssel_f32(op1: svfloat32_t, op2: svint32_t) -> svfloat32_t;
+    }
+    unsafe { _svtssel_f32(op1, op2.as_signed()) }
+}
+#[doc = "Trigonometric select coefficient"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtssel[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(ftssel))]
+pub fn svtssel_f64(op1: svfloat64_t, op2: svuint64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ftssel.x.nxv2f64"
+        )]
+        fn _svtssel_f64(op1: svfloat64_t, op2: svint64_t) -> svfloat64_t;
+    }
+    unsafe { _svtssel_f64(op1, op2.as_signed()) }
+}
+#[doc = "Create an uninitialized tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_f32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef2_f32() -> svfloat32x2_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_f64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef2_f64() -> svfloat64x2_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s8)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef2_s8() -> svint8x2_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef2_s16() -> svint16x2_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef2_s32() -> svint32x2_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef2_s64() -> svint64x2_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u8)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef2_u8() -> svuint8x2_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef2_u16() -> svuint16x2_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef2_u32() -> svuint32x2_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of two vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef2_u64() -> svuint64x2_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_f32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef3_f32() -> svfloat32x3_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_f64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef3_f64() -> svfloat64x3_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s8)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef3_s8() -> svint8x3_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef3_s16() -> svint16x3_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef3_s32() -> svint32x3_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef3_s64() -> svint64x3_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u8)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef3_u8() -> svuint8x3_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef3_u16() -> svuint16x3_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef3_u32() -> svuint32x3_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef3_u64() -> svuint64x3_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_f32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef4_f32() -> svfloat32x4_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_f64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef4_f64() -> svfloat64x4_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s8)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef4_s8() -> svint8x4_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef4_s16() -> svint16x4_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef4_s32() -> svint32x4_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef4_s64() -> svint64x4_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u8)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef4_u8() -> svuint8x4_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef4_u16() -> svuint16x4_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef4_u32() -> svuint32x4_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized tuple of four vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef4_u64() -> svuint64x4_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_f32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef_f32() -> svfloat32_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_f64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef_f64() -> svfloat64_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s8)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef_s8() -> svint8_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef_s16() -> svint16_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef_s32() -> svint32_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef_s64() -> svint64_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u8)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef_u8() -> svuint8_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u16)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef_u16() -> svuint16_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef_u32() -> svuint32_t {
+    simd_reinterpret(())
+}
+#[doc = "Create an uninitialized vector"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."]
+#[inline]
+#[target_feature(enable = "sve")]
+pub unsafe fn svundef_u64() -> svuint64_t {
+    simd_reinterpret(())
+}
+#[doc = "Dot product (unsigned × signed)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,i8mm")]
+#[cfg_attr(test, assert_instr(usdot, IMM_INDEX = 0))]
+pub fn svusdot_lane_s32<const IMM_INDEX: i32>(
+    op1: svint32_t,
+    op2: svuint8_t,
+    op3: svint8_t,
+) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.usdot.lane.nxv4i32"
+        )]
+        fn _svusdot_lane_s32(
+            op1: svint32_t,
+            op2: svint8_t,
+            op3: svint8_t,
+            imm_index: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svusdot_lane_s32(op1, op2.as_signed(), op3, IMM_INDEX) }
+}
+#[doc = "Dot product (unsigned × signed)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,i8mm")]
+#[cfg_attr(test, assert_instr(usdot))]
+pub fn svusdot_s32(op1: svint32_t, op2: svuint8_t, op3: svint8_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usdot.nxv4i32")]
+        fn _svusdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t;
+    }
+    unsafe { _svusdot_s32(op1, op2.as_signed(), op3) }
+}
+#[doc = "Dot product (unsigned × signed)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,i8mm")]
+#[cfg_attr(test, assert_instr(usdot))]
+pub fn svusdot_n_s32(op1: svint32_t, op2: svuint8_t, op3: i8) -> svint32_t {
+    svusdot_s32(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Matrix multiply-accumulate (unsigned × signed)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusmmla[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,i8mm")]
+#[cfg_attr(test, assert_instr(usmmla))]
+pub fn svusmmla_s32(op1: svint32_t, op2: svuint8_t, op3: svint8_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usmmla.nxv4i32")]
+        fn _svusmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t;
+    }
+    unsafe { _svusmmla_s32(op1, op2.as_signed(), op3) }
+}
+#[doc = "Concatenate even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv16i1")]
+        fn _svuzp1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t;
+    }
+    unsafe { _svuzp1_b8(op1, op2) }
+}
+#[doc = "Concatenate even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv8i1")]
+        fn _svuzp1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t;
+    }
+    unsafe { _svuzp1_b16(op1.into(), op2.into()).into() }
+}
+#[doc = "Concatenate even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4i1")]
+        fn _svuzp1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t;
+    }
+    unsafe { _svuzp1_b32(op1.into(), op2.into()).into() }
+}
+#[doc = "Concatenate even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2i1")]
+        fn _svuzp1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t;
+    }
+    unsafe { _svuzp1_b64(op1.into(), op2.into()).into() }
+}
+#[doc = "Concatenate even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4f32")]
+        fn _svuzp1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svuzp1_f32(op1, op2) }
+}
+#[doc = "Concatenate even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2f64")]
+        fn _svuzp1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svuzp1_f64(op1, op2) }
+}
+#[doc = "Concatenate even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv16i8")]
+        fn _svuzp1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svuzp1_s8(op1, op2) }
+}
+#[doc = "Concatenate even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv8i16")]
+        fn _svuzp1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svuzp1_s16(op1, op2) }
+}
+#[doc = "Concatenate even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv4i32")]
+        fn _svuzp1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svuzp1_s32(op1, op2) }
+}
+#[doc = "Concatenate even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1.nxv2i64")]
+        fn _svuzp1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svuzp1_s64(op1, op2) }
+}
+#[doc = "Concatenate even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svuzp1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Concatenate even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svuzp1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Concatenate even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svuzp1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Concatenate even elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svuzp1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Concatenate even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_f32])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv4f32")]
+        fn _svuzp1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svuzp1q_f32(op1, op2) }
+}
+#[doc = "Concatenate even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_f64])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv2f64")]
+        fn _svuzp1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svuzp1q_f64(op1, op2) }
+}
+#[doc = "Concatenate even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv16i8")]
+        fn _svuzp1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svuzp1q_s8(op1, op2) }
+}
+#[doc = "Concatenate even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv8i16")]
+        fn _svuzp1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svuzp1q_s16(op1, op2) }
+}
+#[doc = "Concatenate even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv4i32")]
+        fn _svuzp1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svuzp1q_s32(op1, op2) }
+}
+#[doc = "Concatenate even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp1q.nxv2i64")]
+        fn _svuzp1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svuzp1q_s64(op1, op2) }
+}
+#[doc = "Concatenate even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svuzp1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Concatenate even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svuzp1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Concatenate even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svuzp1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Concatenate even quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp1))]
+pub fn svuzp1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svuzp1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Concatenate odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv16i1")]
+        fn _svuzp2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t;
+    }
+    unsafe { _svuzp2_b8(op1, op2) }
+}
+#[doc = "Concatenate odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv8i1")]
+        fn _svuzp2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t;
+    }
+    unsafe { _svuzp2_b16(op1.into(), op2.into()).into() }
+}
+#[doc = "Concatenate odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4i1")]
+        fn _svuzp2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t;
+    }
+    unsafe { _svuzp2_b32(op1.into(), op2.into()).into() }
+}
+#[doc = "Concatenate odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2i1")]
+        fn _svuzp2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t;
+    }
+    unsafe { _svuzp2_b64(op1.into(), op2.into()).into() }
+}
+#[doc = "Concatenate odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4f32")]
+        fn _svuzp2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svuzp2_f32(op1, op2) }
+}
+#[doc = "Concatenate odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2f64")]
+        fn _svuzp2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svuzp2_f64(op1, op2) }
+}
+#[doc = "Concatenate odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv16i8")]
+        fn _svuzp2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svuzp2_s8(op1, op2) }
+}
+#[doc = "Concatenate odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv8i16")]
+        fn _svuzp2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svuzp2_s16(op1, op2) }
+}
+#[doc = "Concatenate odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv4i32")]
+        fn _svuzp2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svuzp2_s32(op1, op2) }
+}
+#[doc = "Concatenate odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2.nxv2i64")]
+        fn _svuzp2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svuzp2_s64(op1, op2) }
+}
+#[doc = "Concatenate odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svuzp2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Concatenate odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svuzp2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Concatenate odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svuzp2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Concatenate odd elements from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svuzp2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Concatenate odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_f32])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv4f32")]
+        fn _svuzp2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svuzp2q_f32(op1, op2) }
+}
+#[doc = "Concatenate odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_f64])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv2f64")]
+        fn _svuzp2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svuzp2q_f64(op1, op2) }
+}
+#[doc = "Concatenate odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv16i8")]
+        fn _svuzp2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svuzp2q_s8(op1, op2) }
+}
+#[doc = "Concatenate odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv8i16")]
+        fn _svuzp2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svuzp2q_s16(op1, op2) }
+}
+#[doc = "Concatenate odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv4i32")]
+        fn _svuzp2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svuzp2q_s32(op1, op2) }
+}
+#[doc = "Concatenate odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uzp2q.nxv2i64")]
+        fn _svuzp2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svuzp2q_s64(op1, op2) }
+}
+#[doc = "Concatenate odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svuzp2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Concatenate odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svuzp2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Concatenate odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svuzp2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Concatenate odd quadwords from two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(uzp2))]
+pub fn svuzp2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svuzp2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "While incrementing scalar is less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilele))]
+pub fn svwhilele_b8_s32(op1: i32, op2: i32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilele.nxv16i1.i32"
+        )]
+        fn _svwhilele_b8_s32(op1: i32, op2: i32) -> svbool_t;
+    }
+    unsafe { _svwhilele_b8_s32(op1, op2) }
+}
+#[doc = "While incrementing scalar is less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilele))]
+pub fn svwhilele_b16_s32(op1: i32, op2: i32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilele.nxv8i1.i32"
+        )]
+        fn _svwhilele_b16_s32(op1: i32, op2: i32) -> svbool8_t;
+    }
+    unsafe { _svwhilele_b16_s32(op1, op2).into() }
+}
+#[doc = "While incrementing scalar is less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilele))]
+pub fn svwhilele_b32_s32(op1: i32, op2: i32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilele.nxv4i1.i32"
+        )]
+        fn _svwhilele_b32_s32(op1: i32, op2: i32) -> svbool4_t;
+    }
+    unsafe { _svwhilele_b32_s32(op1, op2).into() }
+}
+#[doc = "While incrementing scalar is less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilele))]
+pub fn svwhilele_b64_s32(op1: i32, op2: i32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilele.nxv2i1.i32"
+        )]
+        fn _svwhilele_b64_s32(op1: i32, op2: i32) -> svbool2_t;
+    }
+    unsafe { _svwhilele_b64_s32(op1, op2).into() }
+}
+#[doc = "While incrementing scalar is less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilele))]
+pub fn svwhilele_b8_s64(op1: i64, op2: i64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilele.nxv16i1.i64"
+        )]
+        fn _svwhilele_b8_s64(op1: i64, op2: i64) -> svbool_t;
+    }
+    unsafe { _svwhilele_b8_s64(op1, op2) }
+}
+#[doc = "While incrementing scalar is less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilele))]
+pub fn svwhilele_b16_s64(op1: i64, op2: i64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilele.nxv8i1.i64"
+        )]
+        fn _svwhilele_b16_s64(op1: i64, op2: i64) -> svbool8_t;
+    }
+    unsafe { _svwhilele_b16_s64(op1, op2).into() }
+}
+#[doc = "While incrementing scalar is less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilele))]
+pub fn svwhilele_b32_s64(op1: i64, op2: i64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilele.nxv4i1.i64"
+        )]
+        fn _svwhilele_b32_s64(op1: i64, op2: i64) -> svbool4_t;
+    }
+    unsafe { _svwhilele_b32_s64(op1, op2).into() }
+}
+#[doc = "While incrementing scalar is less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilele))]
+pub fn svwhilele_b64_s64(op1: i64, op2: i64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilele.nxv2i1.i64"
+        )]
+        fn _svwhilele_b64_s64(op1: i64, op2: i64) -> svbool2_t;
+    }
+    unsafe { _svwhilele_b64_s64(op1, op2).into() }
+}
+#[doc = "While incrementing scalar is less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilels))]
+pub fn svwhilele_b8_u32(op1: u32, op2: u32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilels.nxv16i1.i32"
+        )]
+        fn _svwhilele_b8_u32(op1: i32, op2: i32) -> svbool_t;
+    }
+    unsafe { _svwhilele_b8_u32(op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "While incrementing scalar is less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilels))]
+pub fn svwhilele_b16_u32(op1: u32, op2: u32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilels.nxv8i1.i32"
+        )]
+        fn _svwhilele_b16_u32(op1: i32, op2: i32) -> svbool8_t;
+    }
+    unsafe { _svwhilele_b16_u32(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While incrementing scalar is less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilels))]
+pub fn svwhilele_b32_u32(op1: u32, op2: u32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilels.nxv4i1.i32"
+        )]
+        fn _svwhilele_b32_u32(op1: i32, op2: i32) -> svbool4_t;
+    }
+    unsafe { _svwhilele_b32_u32(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While incrementing scalar is less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilels))]
+pub fn svwhilele_b64_u32(op1: u32, op2: u32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilels.nxv2i1.i32"
+        )]
+        fn _svwhilele_b64_u32(op1: i32, op2: i32) -> svbool2_t;
+    }
+    unsafe { _svwhilele_b64_u32(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While incrementing scalar is less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilels))]
+pub fn svwhilele_b8_u64(op1: u64, op2: u64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilels.nxv16i1.i64"
+        )]
+        fn _svwhilele_b8_u64(op1: i64, op2: i64) -> svbool_t;
+    }
+    unsafe { _svwhilele_b8_u64(op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "While incrementing scalar is less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilels))]
+pub fn svwhilele_b16_u64(op1: u64, op2: u64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilels.nxv8i1.i64"
+        )]
+        fn _svwhilele_b16_u64(op1: i64, op2: i64) -> svbool8_t;
+    }
+    unsafe { _svwhilele_b16_u64(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While incrementing scalar is less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilels))]
+pub fn svwhilele_b32_u64(op1: u64, op2: u64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilels.nxv4i1.i64"
+        )]
+        fn _svwhilele_b32_u64(op1: i64, op2: i64) -> svbool4_t;
+    }
+    unsafe { _svwhilele_b32_u64(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While incrementing scalar is less than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilels))]
+pub fn svwhilele_b64_u64(op1: u64, op2: u64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilels.nxv2i1.i64"
+        )]
+        fn _svwhilele_b64_u64(op1: i64, op2: i64) -> svbool2_t;
+    }
+    unsafe { _svwhilele_b64_u64(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While incrementing scalar is less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilelt))]
+pub fn svwhilelt_b8_s32(op1: i32, op2: i32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilelt.nxv16i1.i32"
+        )]
+        fn _svwhilelt_b8_s32(op1: i32, op2: i32) -> svbool_t;
+    }
+    unsafe { _svwhilelt_b8_s32(op1, op2) }
+}
+#[doc = "While incrementing scalar is less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilelt))]
+pub fn svwhilelt_b16_s32(op1: i32, op2: i32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilelt.nxv8i1.i32"
+        )]
+        fn _svwhilelt_b16_s32(op1: i32, op2: i32) -> svbool8_t;
+    }
+    unsafe { _svwhilelt_b16_s32(op1, op2).into() }
+}
+#[doc = "While incrementing scalar is less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilelt))]
+pub fn svwhilelt_b32_s32(op1: i32, op2: i32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilelt.nxv4i1.i32"
+        )]
+        fn _svwhilelt_b32_s32(op1: i32, op2: i32) -> svbool4_t;
+    }
+    unsafe { _svwhilelt_b32_s32(op1, op2).into() }
+}
+#[doc = "While incrementing scalar is less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilelt))]
+pub fn svwhilelt_b64_s32(op1: i32, op2: i32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilelt.nxv2i1.i32"
+        )]
+        fn _svwhilelt_b64_s32(op1: i32, op2: i32) -> svbool2_t;
+    }
+    unsafe { _svwhilelt_b64_s32(op1, op2).into() }
+}
+#[doc = "While incrementing scalar is less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilelt))]
+pub fn svwhilelt_b8_s64(op1: i64, op2: i64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilelt.nxv16i1.i64"
+        )]
+        fn _svwhilelt_b8_s64(op1: i64, op2: i64) -> svbool_t;
+    }
+    unsafe { _svwhilelt_b8_s64(op1, op2) }
+}
+#[doc = "While incrementing scalar is less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilelt))]
+pub fn svwhilelt_b16_s64(op1: i64, op2: i64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilelt.nxv8i1.i64"
+        )]
+        fn _svwhilelt_b16_s64(op1: i64, op2: i64) -> svbool8_t;
+    }
+    unsafe { _svwhilelt_b16_s64(op1, op2).into() }
+}
+#[doc = "While incrementing scalar is less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilelt))]
+pub fn svwhilelt_b32_s64(op1: i64, op2: i64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilelt.nxv4i1.i64"
+        )]
+        fn _svwhilelt_b32_s64(op1: i64, op2: i64) -> svbool4_t;
+    }
+    unsafe { _svwhilelt_b32_s64(op1, op2).into() }
+}
+#[doc = "While incrementing scalar is less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilelt))]
+pub fn svwhilelt_b64_s64(op1: i64, op2: i64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilelt.nxv2i1.i64"
+        )]
+        fn _svwhilelt_b64_s64(op1: i64, op2: i64) -> svbool2_t;
+    }
+    unsafe { _svwhilelt_b64_s64(op1, op2).into() }
+}
+#[doc = "While incrementing scalar is less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilelo))]
+pub fn svwhilelt_b8_u32(op1: u32, op2: u32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilelo.nxv16i1.i32"
+        )]
+        fn _svwhilelt_b8_u32(op1: i32, op2: i32) -> svbool_t;
+    }
+    unsafe { _svwhilelt_b8_u32(op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "While incrementing scalar is less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilelo))]
+pub fn svwhilelt_b16_u32(op1: u32, op2: u32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilelo.nxv8i1.i32"
+        )]
+        fn _svwhilelt_b16_u32(op1: i32, op2: i32) -> svbool8_t;
+    }
+    unsafe { _svwhilelt_b16_u32(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While incrementing scalar is less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilelo))]
+pub fn svwhilelt_b32_u32(op1: u32, op2: u32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilelo.nxv4i1.i32"
+        )]
+        fn _svwhilelt_b32_u32(op1: i32, op2: i32) -> svbool4_t;
+    }
+    unsafe { _svwhilelt_b32_u32(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While incrementing scalar is less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilelo))]
+pub fn svwhilelt_b64_u32(op1: u32, op2: u32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilelo.nxv2i1.i32"
+        )]
+        fn _svwhilelt_b64_u32(op1: i32, op2: i32) -> svbool2_t;
+    }
+    unsafe { _svwhilelt_b64_u32(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While incrementing scalar is less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilelo))]
+pub fn svwhilelt_b8_u64(op1: u64, op2: u64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilelo.nxv16i1.i64"
+        )]
+        fn _svwhilelt_b8_u64(op1: i64, op2: i64) -> svbool_t;
+    }
+    unsafe { _svwhilelt_b8_u64(op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "While incrementing scalar is less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilelo))]
+pub fn svwhilelt_b16_u64(op1: u64, op2: u64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilelo.nxv8i1.i64"
+        )]
+        fn _svwhilelt_b16_u64(op1: i64, op2: i64) -> svbool8_t;
+    }
+    unsafe { _svwhilelt_b16_u64(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While incrementing scalar is less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilelo))]
+pub fn svwhilelt_b32_u64(op1: u64, op2: u64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilelo.nxv4i1.i64"
+        )]
+        fn _svwhilelt_b32_u64(op1: i64, op2: i64) -> svbool4_t;
+    }
+    unsafe { _svwhilelt_b32_u64(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While incrementing scalar is less than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(whilelo))]
+pub fn svwhilelt_b64_u64(op1: u64, op2: u64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilelo.nxv2i1.i64"
+        )]
+        fn _svwhilelt_b64_u64(op1: i64, op2: i64) -> svbool2_t;
+    }
+    unsafe { _svwhilelt_b64_u64(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "Write to the first-fault register"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwrffr)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(wrffr))]
+pub fn svwrffr(op: svbool_t) {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.wrffr")]
+        fn _svwrffr(op: svbool_t);
+    }
+    unsafe { _svwrffr(op) }
+}
+#[doc = "Interleave elements from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv16i1")]
+        fn _svzip1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t;
+    }
+    unsafe { _svzip1_b8(op1, op2) }
+}
+#[doc = "Interleave elements from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv8i1")]
+        fn _svzip1_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t;
+    }
+    unsafe { _svzip1_b16(op1.into(), op2.into()).into() }
+}
+#[doc = "Interleave elements from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4i1")]
+        fn _svzip1_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t;
+    }
+    unsafe { _svzip1_b32(op1.into(), op2.into()).into() }
+}
+#[doc = "Interleave elements from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2i1")]
+        fn _svzip1_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t;
+    }
+    unsafe { _svzip1_b64(op1.into(), op2.into()).into() }
+}
+#[doc = "Interleave elements from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4f32")]
+        fn _svzip1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svzip1_f32(op1, op2) }
+}
+#[doc = "Interleave elements from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2f64")]
+        fn _svzip1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svzip1_f64(op1, op2) }
+}
+#[doc = "Interleave elements from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv16i8")]
+        fn _svzip1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svzip1_s8(op1, op2) }
+}
+#[doc = "Interleave elements from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv8i16")]
+        fn _svzip1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svzip1_s16(op1, op2) }
+}
+#[doc = "Interleave elements from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv4i32")]
+        fn _svzip1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svzip1_s32(op1, op2) }
+}
+#[doc = "Interleave elements from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1.nxv2i64")]
+        fn _svzip1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svzip1_s64(op1, op2) }
+}
+#[doc = "Interleave elements from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svzip1_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave elements from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svzip1_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave elements from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svzip1_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave elements from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svzip1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave quadwords from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_f32])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv4f32")]
+        fn _svzip1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svzip1q_f32(op1, op2) }
+}
+#[doc = "Interleave quadwords from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_f64])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv2f64")]
+        fn _svzip1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svzip1q_f64(op1, op2) }
+}
+#[doc = "Interleave quadwords from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv16i8")]
+        fn _svzip1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svzip1q_s8(op1, op2) }
+}
+#[doc = "Interleave quadwords from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv8i16")]
+        fn _svzip1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svzip1q_s16(op1, op2) }
+}
+#[doc = "Interleave quadwords from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv4i32")]
+        fn _svzip1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svzip1q_s32(op1, op2) }
+}
+#[doc = "Interleave quadwords from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip1q.nxv2i64")]
+        fn _svzip1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svzip1q_s64(op1, op2) }
+}
+#[doc = "Interleave quadwords from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svzip1q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave quadwords from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svzip1q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave quadwords from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svzip1q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave quadwords from low halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip1))]
+pub fn svzip1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svzip1q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave elements from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b8)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv16i1")]
+        fn _svzip2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t;
+    }
+    unsafe { _svzip2_b8(op1, op2) }
+}
+#[doc = "Interleave elements from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b16)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv8i1")]
+        fn _svzip2_b16(op1: svbool8_t, op2: svbool8_t) -> svbool8_t;
+    }
+    unsafe { _svzip2_b16(op1.into(), op2.into()).into() }
+}
+#[doc = "Interleave elements from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b32)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4i1")]
+        fn _svzip2_b32(op1: svbool4_t, op2: svbool4_t) -> svbool4_t;
+    }
+    unsafe { _svzip2_b32(op1.into(), op2.into()).into() }
+}
+#[doc = "Interleave elements from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b64)"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2i1")]
+        fn _svzip2_b64(op1: svbool2_t, op2: svbool2_t) -> svbool2_t;
+    }
+    unsafe { _svzip2_b64(op1.into(), op2.into()).into() }
+}
+#[doc = "Interleave elements from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_f32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4f32")]
+        fn _svzip2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svzip2_f32(op1, op2) }
+}
+#[doc = "Interleave elements from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_f64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2f64")]
+        fn _svzip2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svzip2_f64(op1, op2) }
+}
+#[doc = "Interleave elements from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv16i8")]
+        fn _svzip2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svzip2_s8(op1, op2) }
+}
+#[doc = "Interleave elements from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv8i16")]
+        fn _svzip2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svzip2_s16(op1, op2) }
+}
+#[doc = "Interleave elements from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv4i32")]
+        fn _svzip2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svzip2_s32(op1, op2) }
+}
+#[doc = "Interleave elements from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2.nxv2i64")]
+        fn _svzip2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svzip2_s64(op1, op2) }
+}
+#[doc = "Interleave elements from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u8])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svzip2_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave elements from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u16])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svzip2_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave elements from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u32])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svzip2_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave elements from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u64])"]
+#[inline]
+#[target_feature(enable = "sve")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svzip2_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave quadwords from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_f32])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv4f32")]
+        fn _svzip2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svzip2q_f32(op1, op2) }
+}
+#[doc = "Interleave quadwords from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_f64])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv2f64")]
+        fn _svzip2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svzip2q_f64(op1, op2) }
+}
+#[doc = "Interleave quadwords from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv16i8")]
+        fn _svzip2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svzip2q_s8(op1, op2) }
+}
+#[doc = "Interleave quadwords from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv8i16")]
+        fn _svzip2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svzip2q_s16(op1, op2) }
+}
+#[doc = "Interleave quadwords from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv4i32")]
+        fn _svzip2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svzip2q_s32(op1, op2) }
+}
+#[doc = "Interleave quadwords from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.zip2q.nxv2i64")]
+        fn _svzip2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svzip2q_s64(op1, op2) }
+}
+#[doc = "Interleave quadwords from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svzip2q_s8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave quadwords from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svzip2q_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave quadwords from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svzip2q_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleave quadwords from high halves of two inputs"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,f64mm")]
+#[cfg_attr(test, assert_instr(zip2))]
+pub fn svzip2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svzip2q_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
diff --git a/crates/core_arch/src/aarch64/sve/sve2.rs b/crates/core_arch/src/aarch64/sve/sve2.rs
new file mode 100644
index 0000000000..d5ea52045d
--- /dev/null
+++ b/crates/core_arch/src/aarch64/sve/sve2.rs
@@ -0,0 +1,24008 @@
+// This code is automatically generated. DO NOT MODIFY.
+//
+// Instead, modify `crates/stdarch-gen2/spec/` and run the following command to re-generate this file:
+//
+// ```
+// cargo run --bin=stdarch-gen2 -- crates/stdarch-gen2/spec
+// ```
+#![allow(improper_ctypes)]
+
+#[cfg(test)]
+use stdarch_test::assert_instr;
+
+use super::*;
+
+#[doc = "Absolute difference and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saba))]
+pub fn svaba_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv16i8")]
+        fn _svaba_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
+    }
+    unsafe { _svaba_s8(op1, op2, op3) }
+}
+#[doc = "Absolute difference and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saba))]
+pub fn svaba_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svaba_s8(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Absolute difference and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saba))]
+pub fn svaba_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv8i16")]
+        fn _svaba_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
+    }
+    unsafe { _svaba_s16(op1, op2, op3) }
+}
+#[doc = "Absolute difference and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saba))]
+pub fn svaba_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svaba_s16(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Absolute difference and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saba))]
+pub fn svaba_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv4i32")]
+        fn _svaba_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
+    }
+    unsafe { _svaba_s32(op1, op2, op3) }
+}
+#[doc = "Absolute difference and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saba))]
+pub fn svaba_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svaba_s32(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Absolute difference and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saba))]
+pub fn svaba_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saba.nxv2i64")]
+        fn _svaba_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
+    }
+    unsafe { _svaba_s64(op1, op2, op3) }
+}
+#[doc = "Absolute difference and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saba))]
+pub fn svaba_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svaba_s64(op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Absolute difference and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaba))]
+pub fn svaba_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv16i8")]
+        fn _svaba_u8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
+    }
+    unsafe { _svaba_u8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaba))]
+pub fn svaba_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
+    svaba_u8(op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Absolute difference and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaba))]
+pub fn svaba_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv8i16")]
+        fn _svaba_u16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
+    }
+    unsafe { _svaba_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaba))]
+pub fn svaba_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
+    svaba_u16(op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Absolute difference and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaba))]
+pub fn svaba_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv4i32")]
+        fn _svaba_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
+    }
+    unsafe { _svaba_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaba))]
+pub fn svaba_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svaba_u32(op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Absolute difference and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaba))]
+pub fn svaba_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaba.nxv2i64")]
+        fn _svaba_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
+    }
+    unsafe { _svaba_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaba))]
+pub fn svaba_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svaba_u64(op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabalb))]
+pub fn svabalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv8i16")]
+        fn _svabalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
+    }
+    unsafe { _svabalb_s16(op1, op2, op3) }
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabalb))]
+pub fn svabalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
+    svabalb_s16(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabalb))]
+pub fn svabalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv4i32")]
+        fn _svabalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
+    }
+    unsafe { _svabalb_s32(op1, op2, op3) }
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabalb))]
+pub fn svabalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
+    svabalb_s32(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabalb))]
+pub fn svabalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalb.nxv2i64")]
+        fn _svabalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
+    }
+    unsafe { _svabalb_s64(op1, op2, op3) }
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabalb))]
+pub fn svabalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
+    svabalb_s64(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabalb))]
+pub fn svabalb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv8i16")]
+        fn _svabalb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
+    }
+    unsafe { _svabalb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabalb))]
+pub fn svabalb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t {
+    svabalb_u16(op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabalb))]
+pub fn svabalb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv4i32")]
+        fn _svabalb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
+    }
+    unsafe { _svabalb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabalb))]
+pub fn svabalb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t {
+    svabalb_u32(op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabalb))]
+pub fn svabalb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalb.nxv2i64")]
+        fn _svabalb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
+    }
+    unsafe { _svabalb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabalb))]
+pub fn svabalb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t {
+    svabalb_u64(op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabalt))]
+pub fn svabalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv8i16")]
+        fn _svabalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
+    }
+    unsafe { _svabalt_s16(op1, op2, op3) }
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabalt))]
+pub fn svabalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
+    svabalt_s16(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabalt))]
+pub fn svabalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv4i32")]
+        fn _svabalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
+    }
+    unsafe { _svabalt_s32(op1, op2, op3) }
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabalt))]
+pub fn svabalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
+    svabalt_s32(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabalt))]
+pub fn svabalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabalt.nxv2i64")]
+        fn _svabalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
+    }
+    unsafe { _svabalt_s64(op1, op2, op3) }
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabalt))]
+pub fn svabalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
+    svabalt_s64(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabalt))]
+pub fn svabalt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv8i16")]
+        fn _svabalt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
+    }
+    unsafe { _svabalt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabalt))]
+pub fn svabalt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t {
+    svabalt_u16(op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabalt))]
+pub fn svabalt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv4i32")]
+        fn _svabalt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
+    }
+    unsafe { _svabalt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabalt))]
+pub fn svabalt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t {
+    svabalt_u32(op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabalt))]
+pub fn svabalt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabalt.nxv2i64")]
+        fn _svabalt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
+    }
+    unsafe { _svabalt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabalt))]
+pub fn svabalt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t {
+    svabalt_u64(op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabdlb))]
+pub fn svabdlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv8i16")]
+        fn _svabdlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svabdlb_s16(op1, op2) }
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabdlb))]
+pub fn svabdlb_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
+    svabdlb_s16(op1, svdup_n_s8(op2))
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabdlb))]
+pub fn svabdlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv4i32")]
+        fn _svabdlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svabdlb_s32(op1, op2) }
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabdlb))]
+pub fn svabdlb_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
+    svabdlb_s32(op1, svdup_n_s16(op2))
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabdlb))]
+pub fn svabdlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlb.nxv2i64")]
+        fn _svabdlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svabdlb_s64(op1, op2) }
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabdlb))]
+pub fn svabdlb_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
+    svabdlb_s64(op1, svdup_n_s32(op2))
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabdlb))]
+pub fn svabdlb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv8i16")]
+        fn _svabdlb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svabdlb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabdlb))]
+pub fn svabdlb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
+    svabdlb_u16(op1, svdup_n_u8(op2))
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabdlb))]
+pub fn svabdlb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv4i32")]
+        fn _svabdlb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svabdlb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabdlb))]
+pub fn svabdlb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t {
+    svabdlb_u32(op1, svdup_n_u16(op2))
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabdlb))]
+pub fn svabdlb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlb.nxv2i64")]
+        fn _svabdlb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svabdlb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabdlb))]
+pub fn svabdlb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
+    svabdlb_u64(op1, svdup_n_u32(op2))
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabdlt))]
+pub fn svabdlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv8i16")]
+        fn _svabdlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svabdlt_s16(op1, op2) }
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabdlt))]
+pub fn svabdlt_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
+    svabdlt_s16(op1, svdup_n_s8(op2))
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabdlt))]
+pub fn svabdlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv4i32")]
+        fn _svabdlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svabdlt_s32(op1, op2) }
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabdlt))]
+pub fn svabdlt_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
+    svabdlt_s32(op1, svdup_n_s16(op2))
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabdlt))]
+pub fn svabdlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sabdlt.nxv2i64")]
+        fn _svabdlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svabdlt_s64(op1, op2) }
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sabdlt))]
+pub fn svabdlt_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
+    svabdlt_s64(op1, svdup_n_s32(op2))
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabdlt))]
+pub fn svabdlt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv8i16")]
+        fn _svabdlt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svabdlt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabdlt))]
+pub fn svabdlt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
+    svabdlt_u16(op1, svdup_n_u8(op2))
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabdlt))]
+pub fn svabdlt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv4i32")]
+        fn _svabdlt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svabdlt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabdlt))]
+pub fn svabdlt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t {
+    svabdlt_u32(op1, svdup_n_u16(op2))
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabdlt))]
+pub fn svabdlt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uabdlt.nxv2i64")]
+        fn _svabdlt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svabdlt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Absolute difference long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uabdlt))]
+pub fn svabdlt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
+    svabdlt_u64(op1, svdup_n_u32(op2))
+}
+#[doc = "Add and accumulate long pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sadalp))]
+pub fn svadalp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv8i16")]
+        fn _svadalp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svadalp_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Add and accumulate long pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sadalp))]
+pub fn svadalp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t {
+    svadalp_s16_m(pg, op1, op2)
+}
+#[doc = "Add and accumulate long pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sadalp))]
+pub fn svadalp_s16_z(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t {
+    svadalp_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Add and accumulate long pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sadalp))]
+pub fn svadalp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv4i32")]
+        fn _svadalp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svadalp_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Add and accumulate long pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sadalp))]
+pub fn svadalp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t {
+    svadalp_s32_m(pg, op1, op2)
+}
+#[doc = "Add and accumulate long pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sadalp))]
+pub fn svadalp_s32_z(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t {
+    svadalp_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Add and accumulate long pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sadalp))]
+pub fn svadalp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sadalp.nxv2i64")]
+        fn _svadalp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svadalp_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Add and accumulate long pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sadalp))]
+pub fn svadalp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t {
+    svadalp_s64_m(pg, op1, op2)
+}
+#[doc = "Add and accumulate long pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sadalp))]
+pub fn svadalp_s64_z(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t {
+    svadalp_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Add and accumulate long pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uadalp))]
+pub fn svadalp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv8i16")]
+        fn _svadalp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svadalp_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add and accumulate long pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uadalp))]
+pub fn svadalp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t {
+    svadalp_u16_m(pg, op1, op2)
+}
+#[doc = "Add and accumulate long pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uadalp))]
+pub fn svadalp_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_t {
+    svadalp_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Add and accumulate long pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uadalp))]
+pub fn svadalp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv4i32")]
+        fn _svadalp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svadalp_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add and accumulate long pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uadalp))]
+pub fn svadalp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t {
+    svadalp_u32_m(pg, op1, op2)
+}
+#[doc = "Add and accumulate long pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uadalp))]
+pub fn svadalp_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32_t {
+    svadalp_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Add and accumulate long pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uadalp))]
+pub fn svadalp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uadalp.nxv2i64")]
+        fn _svadalp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svadalp_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add and accumulate long pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uadalp))]
+pub fn svadalp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t {
+    svadalp_u64_m(pg, op1, op2)
+}
+#[doc = "Add and accumulate long pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uadalp))]
+pub fn svadalp_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64_t {
+    svadalp_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Add with carry long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(adclb))]
+pub fn svadclb_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclb.nxv4i32")]
+        fn _svadclb_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
+    }
+    unsafe { _svadclb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Add with carry long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(adclb))]
+pub fn svadclb_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svadclb_u32(op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Add with carry long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(adclb))]
+pub fn svadclb_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclb.nxv2i64")]
+        fn _svadclb_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
+    }
+    unsafe { _svadclb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Add with carry long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(adclb))]
+pub fn svadclb_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svadclb_u64(op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Add with carry long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(adclt))]
+pub fn svadclt_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclt.nxv4i32")]
+        fn _svadclt_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
+    }
+    unsafe { _svadclt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Add with carry long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(adclt))]
+pub fn svadclt_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svadclt_u32(op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Add with carry long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(adclt))]
+pub fn svadclt_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.adclt.nxv2i64")]
+        fn _svadclt_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
+    }
+    unsafe { _svadclt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Add with carry long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(adclt))]
+pub fn svadclt_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svadclt_u64(op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnb))]
+pub fn svaddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv8i16")]
+        fn _svaddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t;
+    }
+    unsafe { _svaddhnb_s16(op1, op2) }
+}
+#[doc = "Add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnb))]
+pub fn svaddhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t {
+    svaddhnb_s16(op1, svdup_n_s16(op2))
+}
+#[doc = "Add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnb))]
+pub fn svaddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv4i32")]
+        fn _svaddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t;
+    }
+    unsafe { _svaddhnb_s32(op1, op2) }
+}
+#[doc = "Add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnb))]
+pub fn svaddhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t {
+    svaddhnb_s32(op1, svdup_n_s32(op2))
+}
+#[doc = "Add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnb))]
+pub fn svaddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnb.nxv2i64")]
+        fn _svaddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t;
+    }
+    unsafe { _svaddhnb_s64(op1, op2) }
+}
+#[doc = "Add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnb))]
+pub fn svaddhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t {
+    svaddhnb_s64(op1, svdup_n_s64(op2))
+}
+#[doc = "Add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnb))]
+pub fn svaddhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t {
+    unsafe { svaddhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnb))]
+pub fn svaddhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t {
+    svaddhnb_u16(op1, svdup_n_u16(op2))
+}
+#[doc = "Add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnb))]
+pub fn svaddhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t {
+    unsafe { svaddhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnb))]
+pub fn svaddhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t {
+    svaddhnb_u32(op1, svdup_n_u32(op2))
+}
+#[doc = "Add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnb))]
+pub fn svaddhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t {
+    unsafe { svaddhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnb))]
+pub fn svaddhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t {
+    svaddhnb_u64(op1, svdup_n_u64(op2))
+}
+#[doc = "Add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnt))]
+pub fn svaddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv8i16")]
+        fn _svaddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t;
+    }
+    unsafe { _svaddhnt_s16(even, op1, op2) }
+}
+#[doc = "Add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnt))]
+pub fn svaddhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t {
+    svaddhnt_s16(even, op1, svdup_n_s16(op2))
+}
+#[doc = "Add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnt))]
+pub fn svaddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv4i32")]
+        fn _svaddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t;
+    }
+    unsafe { _svaddhnt_s32(even, op1, op2) }
+}
+#[doc = "Add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnt))]
+pub fn svaddhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t {
+    svaddhnt_s32(even, op1, svdup_n_s32(op2))
+}
+#[doc = "Add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnt))]
+pub fn svaddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addhnt.nxv2i64")]
+        fn _svaddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t;
+    }
+    unsafe { _svaddhnt_s64(even, op1, op2) }
+}
+#[doc = "Add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnt))]
+pub fn svaddhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t {
+    svaddhnt_s64(even, op1, svdup_n_s64(op2))
+}
+#[doc = "Add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnt))]
+pub fn svaddhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t {
+    unsafe { svaddhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnt))]
+pub fn svaddhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t {
+    svaddhnt_u16(even, op1, svdup_n_u16(op2))
+}
+#[doc = "Add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnt))]
+pub fn svaddhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t {
+    unsafe { svaddhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnt))]
+pub fn svaddhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t {
+    svaddhnt_u32(even, op1, svdup_n_u32(op2))
+}
+#[doc = "Add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnt))]
+pub fn svaddhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t {
+    unsafe { svaddhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addhnt))]
+pub fn svaddhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t {
+    svaddhnt_u64(even, op1, svdup_n_u64(op2))
+}
+#[doc = "Add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddlb))]
+pub fn svaddlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv8i16")]
+        fn _svaddlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svaddlb_s16(op1, op2) }
+}
+#[doc = "Add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddlb))]
+pub fn svaddlb_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
+    svaddlb_s16(op1, svdup_n_s8(op2))
+}
+#[doc = "Add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddlb))]
+pub fn svaddlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv4i32")]
+        fn _svaddlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svaddlb_s32(op1, op2) }
+}
+#[doc = "Add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddlb))]
+pub fn svaddlb_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
+    svaddlb_s32(op1, svdup_n_s16(op2))
+}
+#[doc = "Add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddlb))]
+pub fn svaddlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlb.nxv2i64")]
+        fn _svaddlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svaddlb_s64(op1, op2) }
+}
+#[doc = "Add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddlb))]
+pub fn svaddlb_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
+    svaddlb_s64(op1, svdup_n_s32(op2))
+}
+#[doc = "Add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddlb))]
+pub fn svaddlb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv8i16")]
+        fn _svaddlb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svaddlb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddlb))]
+pub fn svaddlb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
+    svaddlb_u16(op1, svdup_n_u8(op2))
+}
+#[doc = "Add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddlb))]
+pub fn svaddlb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv4i32")]
+        fn _svaddlb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svaddlb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddlb))]
+pub fn svaddlb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t {
+    svaddlb_u32(op1, svdup_n_u16(op2))
+}
+#[doc = "Add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddlb))]
+pub fn svaddlb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlb.nxv2i64")]
+        fn _svaddlb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svaddlb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddlb))]
+pub fn svaddlb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
+    svaddlb_u64(op1, svdup_n_u32(op2))
+}
+#[doc = "Add long (bottom + top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddlbt))]
+pub fn svaddlbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.saddlbt.nxv8i16"
+        )]
+        fn _svaddlbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svaddlbt_s16(op1, op2) }
+}
+#[doc = "Add long (bottom + top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddlbt))]
+pub fn svaddlbt_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
+    svaddlbt_s16(op1, svdup_n_s8(op2))
+}
+#[doc = "Add long (bottom + top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddlbt))]
+pub fn svaddlbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.saddlbt.nxv4i32"
+        )]
+        fn _svaddlbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svaddlbt_s32(op1, op2) }
+}
+#[doc = "Add long (bottom + top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddlbt))]
+pub fn svaddlbt_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
+    svaddlbt_s32(op1, svdup_n_s16(op2))
+}
+#[doc = "Add long (bottom + top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddlbt))]
+pub fn svaddlbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.saddlbt.nxv2i64"
+        )]
+        fn _svaddlbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svaddlbt_s64(op1, op2) }
+}
+#[doc = "Add long (bottom + top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddlbt))]
+pub fn svaddlbt_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
+    svaddlbt_s64(op1, svdup_n_s32(op2))
+}
+#[doc = "Add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddlt))]
+pub fn svaddlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv8i16")]
+        fn _svaddlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svaddlt_s16(op1, op2) }
+}
+#[doc = "Add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddlt))]
+pub fn svaddlt_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
+    svaddlt_s16(op1, svdup_n_s8(op2))
+}
+#[doc = "Add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddlt))]
+pub fn svaddlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv4i32")]
+        fn _svaddlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svaddlt_s32(op1, op2) }
+}
+#[doc = "Add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddlt))]
+pub fn svaddlt_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
+    svaddlt_s32(op1, svdup_n_s16(op2))
+}
+#[doc = "Add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddlt))]
+pub fn svaddlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddlt.nxv2i64")]
+        fn _svaddlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svaddlt_s64(op1, op2) }
+}
+#[doc = "Add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddlt))]
+pub fn svaddlt_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
+    svaddlt_s64(op1, svdup_n_s32(op2))
+}
+#[doc = "Add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddlt))]
+pub fn svaddlt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv8i16")]
+        fn _svaddlt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svaddlt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddlt))]
+pub fn svaddlt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
+    svaddlt_u16(op1, svdup_n_u8(op2))
+}
+#[doc = "Add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddlt))]
+pub fn svaddlt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv4i32")]
+        fn _svaddlt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svaddlt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddlt))]
+pub fn svaddlt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t {
+    svaddlt_u32(op1, svdup_n_u16(op2))
+}
+#[doc = "Add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddlt))]
+pub fn svaddlt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddlt.nxv2i64")]
+        fn _svaddlt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svaddlt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddlt))]
+pub fn svaddlt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
+    svaddlt_u64(op1, svdup_n_u32(op2))
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(faddp))]
+pub fn svaddp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddp.nxv4f32")]
+        fn _svaddp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svaddp_f32_m(pg.into(), op1, op2) }
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(faddp))]
+pub fn svaddp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svaddp_f32_m(pg, op1, op2)
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(faddp))]
+pub fn svaddp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.faddp.nxv2f64")]
+        fn _svaddp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svaddp_f64_m(pg.into(), op1, op2) }
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(faddp))]
+pub fn svaddp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svaddp_f64_m(pg, op1, op2)
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addp))]
+pub fn svaddp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv16i8")]
+        fn _svaddp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svaddp_s8_m(pg, op1, op2) }
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addp))]
+pub fn svaddp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svaddp_s8_m(pg, op1, op2)
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addp))]
+pub fn svaddp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv8i16")]
+        fn _svaddp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svaddp_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addp))]
+pub fn svaddp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svaddp_s16_m(pg, op1, op2)
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addp))]
+pub fn svaddp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv4i32")]
+        fn _svaddp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svaddp_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addp))]
+pub fn svaddp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svaddp_s32_m(pg, op1, op2)
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addp))]
+pub fn svaddp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.addp.nxv2i64")]
+        fn _svaddp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svaddp_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addp))]
+pub fn svaddp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svaddp_s64_m(pg, op1, op2)
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addp))]
+pub fn svaddp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svaddp_s8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addp))]
+pub fn svaddp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svaddp_u8_m(pg, op1, op2)
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addp))]
+pub fn svaddp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { svaddp_s16_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addp))]
+pub fn svaddp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svaddp_u16_m(pg, op1, op2)
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addp))]
+pub fn svaddp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svaddp_s32_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addp))]
+pub fn svaddp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svaddp_u32_m(pg, op1, op2)
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addp))]
+pub fn svaddp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svaddp_s64_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(addp))]
+pub fn svaddp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svaddp_u64_m(pg, op1, op2)
+}
+#[doc = "Add wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddwb))]
+pub fn svaddwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv8i16")]
+        fn _svaddwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svaddwb_s16(op1, op2) }
+}
+#[doc = "Add wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddwb))]
+pub fn svaddwb_n_s16(op1: svint16_t, op2: i8) -> svint16_t {
+    svaddwb_s16(op1, svdup_n_s8(op2))
+}
+#[doc = "Add wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddwb))]
+pub fn svaddwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv4i32")]
+        fn _svaddwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svaddwb_s32(op1, op2) }
+}
+#[doc = "Add wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddwb))]
+pub fn svaddwb_n_s32(op1: svint32_t, op2: i16) -> svint32_t {
+    svaddwb_s32(op1, svdup_n_s16(op2))
+}
+#[doc = "Add wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddwb))]
+pub fn svaddwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwb.nxv2i64")]
+        fn _svaddwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svaddwb_s64(op1, op2) }
+}
+#[doc = "Add wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddwb))]
+pub fn svaddwb_n_s64(op1: svint64_t, op2: i32) -> svint64_t {
+    svaddwb_s64(op1, svdup_n_s32(op2))
+}
+#[doc = "Add wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddwb))]
+pub fn svaddwb_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv8i16")]
+        fn _svaddwb_u16(op1: svint16_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svaddwb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddwb))]
+pub fn svaddwb_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t {
+    svaddwb_u16(op1, svdup_n_u8(op2))
+}
+#[doc = "Add wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddwb))]
+pub fn svaddwb_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv4i32")]
+        fn _svaddwb_u32(op1: svint32_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svaddwb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddwb))]
+pub fn svaddwb_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t {
+    svaddwb_u32(op1, svdup_n_u16(op2))
+}
+#[doc = "Add wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddwb))]
+pub fn svaddwb_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwb.nxv2i64")]
+        fn _svaddwb_u64(op1: svint64_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svaddwb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddwb))]
+pub fn svaddwb_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t {
+    svaddwb_u64(op1, svdup_n_u32(op2))
+}
+#[doc = "Add wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddwt))]
+pub fn svaddwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv8i16")]
+        fn _svaddwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svaddwt_s16(op1, op2) }
+}
+#[doc = "Add wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddwt))]
+pub fn svaddwt_n_s16(op1: svint16_t, op2: i8) -> svint16_t {
+    svaddwt_s16(op1, svdup_n_s8(op2))
+}
+#[doc = "Add wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddwt))]
+pub fn svaddwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv4i32")]
+        fn _svaddwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svaddwt_s32(op1, op2) }
+}
+#[doc = "Add wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddwt))]
+pub fn svaddwt_n_s32(op1: svint32_t, op2: i16) -> svint32_t {
+    svaddwt_s32(op1, svdup_n_s16(op2))
+}
+#[doc = "Add wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddwt))]
+pub fn svaddwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.saddwt.nxv2i64")]
+        fn _svaddwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svaddwt_s64(op1, op2) }
+}
+#[doc = "Add wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(saddwt))]
+pub fn svaddwt_n_s64(op1: svint64_t, op2: i32) -> svint64_t {
+    svaddwt_s64(op1, svdup_n_s32(op2))
+}
+#[doc = "Add wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddwt))]
+pub fn svaddwt_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv8i16")]
+        fn _svaddwt_u16(op1: svint16_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svaddwt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddwt))]
+pub fn svaddwt_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t {
+    svaddwt_u16(op1, svdup_n_u8(op2))
+}
+#[doc = "Add wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddwt))]
+pub fn svaddwt_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv4i32")]
+        fn _svaddwt_u32(op1: svint32_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svaddwt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddwt))]
+pub fn svaddwt_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t {
+    svaddwt_u32(op1, svdup_n_u16(op2))
+}
+#[doc = "Add wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddwt))]
+pub fn svaddwt_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uaddwt.nxv2i64")]
+        fn _svaddwt_u64(op1: svint64_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svaddwt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Add wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uaddwt))]
+pub fn svaddwt_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t {
+    svaddwt_u64(op1, svdup_n_u32(op2))
+}
+#[doc = "AES single round decryption"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesd[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(aesd))]
+pub fn svaesd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesd")]
+        fn _svaesd_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svaesd_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "AES single round encryption"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaese[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(aese))]
+pub fn svaese_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aese")]
+        fn _svaese_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svaese_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "AES inverse mix columns"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesimc[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(aesimc))]
+pub fn svaesimc_u8(op: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesimc")]
+        fn _svaesimc_u8(op: svint8_t) -> svint8_t;
+    }
+    unsafe { _svaesimc_u8(op.as_signed()).as_unsigned() }
+}
+#[doc = "AES mix columns"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesmc[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(aesmc))]
+pub fn svaesmc_u8(op: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.aesmc")]
+        fn _svaesmc_u8(op: svint8_t) -> svint8_t;
+    }
+    unsafe { _svaesmc_u8(op.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise clear and exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bcax))]
+pub fn svbcax_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv16i8")]
+        fn _svbcax_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
+    }
+    unsafe { _svbcax_s8(op1, op2, op3) }
+}
+#[doc = "Bitwise clear and exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bcax))]
+pub fn svbcax_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svbcax_s8(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Bitwise clear and exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bcax))]
+pub fn svbcax_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv8i16")]
+        fn _svbcax_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
+    }
+    unsafe { _svbcax_s16(op1, op2, op3) }
+}
+#[doc = "Bitwise clear and exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bcax))]
+pub fn svbcax_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svbcax_s16(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Bitwise clear and exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bcax))]
+pub fn svbcax_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv4i32")]
+        fn _svbcax_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
+    }
+    unsafe { _svbcax_s32(op1, op2, op3) }
+}
+#[doc = "Bitwise clear and exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bcax))]
+pub fn svbcax_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svbcax_s32(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Bitwise clear and exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bcax))]
+pub fn svbcax_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bcax.nxv2i64")]
+        fn _svbcax_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
+    }
+    unsafe { _svbcax_s64(op1, op2, op3) }
+}
+#[doc = "Bitwise clear and exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bcax))]
+pub fn svbcax_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svbcax_s64(op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Bitwise clear and exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bcax))]
+pub fn svbcax_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
+    unsafe { svbcax_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise clear and exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bcax))]
+pub fn svbcax_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
+    svbcax_u8(op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Bitwise clear and exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bcax))]
+pub fn svbcax_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
+    unsafe { svbcax_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise clear and exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bcax))]
+pub fn svbcax_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
+    svbcax_u16(op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Bitwise clear and exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bcax))]
+pub fn svbcax_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    unsafe { svbcax_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise clear and exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bcax))]
+pub fn svbcax_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svbcax_u32(op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Bitwise clear and exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bcax))]
+pub fn svbcax_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    unsafe { svbcax_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise clear and exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bcax))]
+pub fn svbcax_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svbcax_u64(op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Scatter lower bits into positions selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bdep))]
+pub fn svbdep_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv16i8")]
+        fn _svbdep_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svbdep_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Scatter lower bits into positions selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bdep))]
+pub fn svbdep_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t {
+    svbdep_u8(op1, svdup_n_u8(op2))
+}
+#[doc = "Scatter lower bits into positions selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bdep))]
+pub fn svbdep_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv8i16")]
+        fn _svbdep_u16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svbdep_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Scatter lower bits into positions selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bdep))]
+pub fn svbdep_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t {
+    svbdep_u16(op1, svdup_n_u16(op2))
+}
+#[doc = "Scatter lower bits into positions selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bdep))]
+pub fn svbdep_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv4i32")]
+        fn _svbdep_u32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svbdep_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Scatter lower bits into positions selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bdep))]
+pub fn svbdep_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t {
+    svbdep_u32(op1, svdup_n_u32(op2))
+}
+#[doc = "Scatter lower bits into positions selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bdep))]
+pub fn svbdep_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bdep.x.nxv2i64")]
+        fn _svbdep_u64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svbdep_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Scatter lower bits into positions selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bdep))]
+pub fn svbdep_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t {
+    svbdep_u64(op1, svdup_n_u64(op2))
+}
+#[doc = "Gather lower bits from positions selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bext))]
+pub fn svbext_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv16i8")]
+        fn _svbext_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svbext_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Gather lower bits from positions selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bext))]
+pub fn svbext_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t {
+    svbext_u8(op1, svdup_n_u8(op2))
+}
+#[doc = "Gather lower bits from positions selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bext))]
+pub fn svbext_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv8i16")]
+        fn _svbext_u16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svbext_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Gather lower bits from positions selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bext))]
+pub fn svbext_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t {
+    svbext_u16(op1, svdup_n_u16(op2))
+}
+#[doc = "Gather lower bits from positions selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bext))]
+pub fn svbext_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv4i32")]
+        fn _svbext_u32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svbext_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Gather lower bits from positions selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bext))]
+pub fn svbext_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t {
+    svbext_u32(op1, svdup_n_u32(op2))
+}
+#[doc = "Gather lower bits from positions selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bext))]
+pub fn svbext_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bext.x.nxv2i64")]
+        fn _svbext_u64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svbext_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Gather lower bits from positions selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bext))]
+pub fn svbext_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t {
+    svbext_u64(op1, svdup_n_u64(op2))
+}
+#[doc = "Group bits to right or left as selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bgrp))]
+pub fn svbgrp_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv16i8")]
+        fn _svbgrp_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svbgrp_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Group bits to right or left as selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bgrp))]
+pub fn svbgrp_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t {
+    svbgrp_u8(op1, svdup_n_u8(op2))
+}
+#[doc = "Group bits to right or left as selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bgrp))]
+pub fn svbgrp_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv8i16")]
+        fn _svbgrp_u16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svbgrp_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Group bits to right or left as selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bgrp))]
+pub fn svbgrp_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t {
+    svbgrp_u16(op1, svdup_n_u16(op2))
+}
+#[doc = "Group bits to right or left as selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bgrp))]
+pub fn svbgrp_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv4i32")]
+        fn _svbgrp_u32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svbgrp_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Group bits to right or left as selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bgrp))]
+pub fn svbgrp_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t {
+    svbgrp_u32(op1, svdup_n_u32(op2))
+}
+#[doc = "Group bits to right or left as selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bgrp))]
+pub fn svbgrp_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bgrp.x.nxv2i64")]
+        fn _svbgrp_u64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svbgrp_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Group bits to right or left as selected by bitmask"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-bitperm")]
+#[cfg_attr(test, assert_instr(bgrp))]
+pub fn svbgrp_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t {
+    svbgrp_u64(op1, svdup_n_u64(op2))
+}
+#[doc = "Bitwise select with first input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl1n))]
+pub fn svbsl1n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv16i8")]
+        fn _svbsl1n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
+    }
+    unsafe { _svbsl1n_s8(op1, op2, op3) }
+}
+#[doc = "Bitwise select with first input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl1n))]
+pub fn svbsl1n_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svbsl1n_s8(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Bitwise select with first input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl1n))]
+pub fn svbsl1n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv8i16")]
+        fn _svbsl1n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
+    }
+    unsafe { _svbsl1n_s16(op1, op2, op3) }
+}
+#[doc = "Bitwise select with first input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl1n))]
+pub fn svbsl1n_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svbsl1n_s16(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Bitwise select with first input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl1n))]
+pub fn svbsl1n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv4i32")]
+        fn _svbsl1n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
+    }
+    unsafe { _svbsl1n_s32(op1, op2, op3) }
+}
+#[doc = "Bitwise select with first input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl1n))]
+pub fn svbsl1n_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svbsl1n_s32(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Bitwise select with first input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl1n))]
+pub fn svbsl1n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl1n.nxv2i64")]
+        fn _svbsl1n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
+    }
+    unsafe { _svbsl1n_s64(op1, op2, op3) }
+}
+#[doc = "Bitwise select with first input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl1n))]
+pub fn svbsl1n_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svbsl1n_s64(op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Bitwise select with first input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl1n))]
+pub fn svbsl1n_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
+    unsafe { svbsl1n_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise select with first input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl1n))]
+pub fn svbsl1n_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
+    svbsl1n_u8(op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Bitwise select with first input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl1n))]
+pub fn svbsl1n_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
+    unsafe { svbsl1n_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise select with first input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl1n))]
+pub fn svbsl1n_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
+    svbsl1n_u16(op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Bitwise select with first input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl1n))]
+pub fn svbsl1n_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    unsafe { svbsl1n_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise select with first input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl1n))]
+pub fn svbsl1n_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svbsl1n_u32(op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Bitwise select with first input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl1n))]
+pub fn svbsl1n_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    unsafe { svbsl1n_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise select with first input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl1n))]
+pub fn svbsl1n_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svbsl1n_u64(op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Bitwise select with second input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl2n))]
+pub fn svbsl2n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv16i8")]
+        fn _svbsl2n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
+    }
+    unsafe { _svbsl2n_s8(op1, op2, op3) }
+}
+#[doc = "Bitwise select with second input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl2n))]
+pub fn svbsl2n_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svbsl2n_s8(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Bitwise select with second input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl2n))]
+pub fn svbsl2n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv8i16")]
+        fn _svbsl2n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
+    }
+    unsafe { _svbsl2n_s16(op1, op2, op3) }
+}
+#[doc = "Bitwise select with second input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl2n))]
+pub fn svbsl2n_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svbsl2n_s16(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Bitwise select with second input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl2n))]
+pub fn svbsl2n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv4i32")]
+        fn _svbsl2n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
+    }
+    unsafe { _svbsl2n_s32(op1, op2, op3) }
+}
+#[doc = "Bitwise select with second input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl2n))]
+pub fn svbsl2n_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svbsl2n_s32(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Bitwise select with second input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl2n))]
+pub fn svbsl2n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl2n.nxv2i64")]
+        fn _svbsl2n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
+    }
+    unsafe { _svbsl2n_s64(op1, op2, op3) }
+}
+#[doc = "Bitwise select with second input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl2n))]
+pub fn svbsl2n_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svbsl2n_s64(op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Bitwise select with second input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl2n))]
+pub fn svbsl2n_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
+    unsafe { svbsl2n_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise select with second input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl2n))]
+pub fn svbsl2n_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
+    svbsl2n_u8(op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Bitwise select with second input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl2n))]
+pub fn svbsl2n_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
+    unsafe { svbsl2n_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise select with second input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl2n))]
+pub fn svbsl2n_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
+    svbsl2n_u16(op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Bitwise select with second input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl2n))]
+pub fn svbsl2n_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    unsafe { svbsl2n_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise select with second input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl2n))]
+pub fn svbsl2n_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svbsl2n_u32(op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Bitwise select with second input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl2n))]
+pub fn svbsl2n_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    unsafe { svbsl2n_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise select with second input inverted"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl2n))]
+pub fn svbsl2n_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svbsl2n_u64(op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl))]
+pub fn svbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv16i8")]
+        fn _svbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
+    }
+    unsafe { _svbsl_s8(op1, op2, op3) }
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl))]
+pub fn svbsl_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svbsl_s8(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl))]
+pub fn svbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv8i16")]
+        fn _svbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
+    }
+    unsafe { _svbsl_s16(op1, op2, op3) }
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl))]
+pub fn svbsl_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svbsl_s16(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl))]
+pub fn svbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv4i32")]
+        fn _svbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
+    }
+    unsafe { _svbsl_s32(op1, op2, op3) }
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl))]
+pub fn svbsl_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svbsl_s32(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl))]
+pub fn svbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.bsl.nxv2i64")]
+        fn _svbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
+    }
+    unsafe { _svbsl_s64(op1, op2, op3) }
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl))]
+pub fn svbsl_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svbsl_s64(op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl))]
+pub fn svbsl_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
+    unsafe { svbsl_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl))]
+pub fn svbsl_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
+    svbsl_u8(op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl))]
+pub fn svbsl_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
+    unsafe { svbsl_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl))]
+pub fn svbsl_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
+    svbsl_u16(op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl))]
+pub fn svbsl_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    unsafe { svbsl_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl))]
+pub fn svbsl_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svbsl_u32(op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl))]
+pub fn svbsl_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    unsafe { svbsl_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(bsl))]
+pub fn svbsl_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svbsl_u64(op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Complex add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))]
+pub fn svcadd_s8<const IMM_ROTATION: i32>(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv16i8")]
+        fn _svcadd_s8(op1: svint8_t, op2: svint8_t, imm_rotation: i32) -> svint8_t;
+    }
+    unsafe { _svcadd_s8(op1, op2, IMM_ROTATION) }
+}
+#[doc = "Complex add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))]
+pub fn svcadd_s16<const IMM_ROTATION: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv8i16")]
+        fn _svcadd_s16(op1: svint16_t, op2: svint16_t, imm_rotation: i32) -> svint16_t;
+    }
+    unsafe { _svcadd_s16(op1, op2, IMM_ROTATION) }
+}
+#[doc = "Complex add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))]
+pub fn svcadd_s32<const IMM_ROTATION: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv4i32")]
+        fn _svcadd_s32(op1: svint32_t, op2: svint32_t, imm_rotation: i32) -> svint32_t;
+    }
+    unsafe { _svcadd_s32(op1, op2, IMM_ROTATION) }
+}
+#[doc = "Complex add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))]
+pub fn svcadd_s64<const IMM_ROTATION: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cadd.x.nxv2i64")]
+        fn _svcadd_s64(op1: svint64_t, op2: svint64_t, imm_rotation: i32) -> svint64_t;
+    }
+    unsafe { _svcadd_s64(op1, op2, IMM_ROTATION) }
+}
+#[doc = "Complex add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))]
+pub fn svcadd_u8<const IMM_ROTATION: i32>(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
+    unsafe { svcadd_s8::<IMM_ROTATION>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Complex add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))]
+pub fn svcadd_u16<const IMM_ROTATION: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
+    unsafe { svcadd_s16::<IMM_ROTATION>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Complex add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))]
+pub fn svcadd_u32<const IMM_ROTATION: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
+    unsafe { svcadd_s32::<IMM_ROTATION>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Complex add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))]
+pub fn svcadd_u64<const IMM_ROTATION: i32>(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
+    unsafe { svcadd_s64::<IMM_ROTATION>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Complex dot product"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cdot, IMM_INDEX = 0, IMM_ROTATION = 90))]
+pub fn svcdot_lane_s32<const IMM_INDEX: i32, const IMM_ROTATION: i32>(
+    op1: svint32_t,
+    op2: svint8_t,
+    op3: svint8_t,
+) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cdot.lane.nxv4i32"
+        )]
+        fn _svcdot_lane_s32(
+            op1: svint32_t,
+            op2: svint8_t,
+            op3: svint8_t,
+            imm_index: i32,
+            imm_rotation: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svcdot_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) }
+}
+#[doc = "Complex dot product"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cdot, IMM_INDEX = 0, IMM_ROTATION = 90))]
+pub fn svcdot_lane_s64<const IMM_INDEX: i32, const IMM_ROTATION: i32>(
+    op1: svint64_t,
+    op2: svint16_t,
+    op3: svint16_t,
+) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cdot.lane.nxv2i64"
+        )]
+        fn _svcdot_lane_s64(
+            op1: svint64_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            imm_index: i32,
+            imm_rotation: i32,
+        ) -> svint64_t;
+    }
+    unsafe { _svcdot_lane_s64(op1, op2, op3, IMM_INDEX, IMM_ROTATION) }
+}
+#[doc = "Complex dot product"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cdot, IMM_ROTATION = 90))]
+pub fn svcdot_s32<const IMM_ROTATION: i32>(
+    op1: svint32_t,
+    op2: svint8_t,
+    op3: svint8_t,
+) -> svint32_t {
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cdot.nxv4i32")]
+        fn _svcdot_s32(
+            op1: svint32_t,
+            op2: svint8_t,
+            op3: svint8_t,
+            imm_rotation: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svcdot_s32(op1, op2, op3, IMM_ROTATION) }
+}
+#[doc = "Complex dot product"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cdot, IMM_ROTATION = 90))]
+pub fn svcdot_s64<const IMM_ROTATION: i32>(
+    op1: svint64_t,
+    op2: svint16_t,
+    op3: svint16_t,
+) -> svint64_t {
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cdot.nxv2i64")]
+        fn _svcdot_s64(
+            op1: svint64_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            imm_rotation: i32,
+        ) -> svint64_t;
+    }
+    unsafe { _svcdot_s64(op1, op2, op3, IMM_ROTATION) }
+}
+#[doc = "Complex multiply-add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))]
+pub fn svcmla_lane_s16<const IMM_INDEX: i32, const IMM_ROTATION: i32>(
+    op1: svint16_t,
+    op2: svint16_t,
+    op3: svint16_t,
+) -> svint16_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmla.lane.x.nxv8i16"
+        )]
+        fn _svcmla_lane_s16(
+            op1: svint16_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            imm_index: i32,
+            imm_rotation: i32,
+        ) -> svint16_t;
+    }
+    unsafe { _svcmla_lane_s16(op1, op2, op3, IMM_INDEX, IMM_ROTATION) }
+}
+#[doc = "Complex multiply-add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))]
+pub fn svcmla_lane_s32<const IMM_INDEX: i32, const IMM_ROTATION: i32>(
+    op1: svint32_t,
+    op2: svint32_t,
+    op3: svint32_t,
+) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.cmla.lane.x.nxv4i32"
+        )]
+        fn _svcmla_lane_s32(
+            op1: svint32_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            imm_index: i32,
+            imm_rotation: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svcmla_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) }
+}
+#[doc = "Complex multiply-add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))]
+pub fn svcmla_lane_u16<const IMM_INDEX: i32, const IMM_ROTATION: i32>(
+    op1: svuint16_t,
+    op2: svuint16_t,
+    op3: svuint16_t,
+) -> svuint16_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    unsafe {
+        svcmla_lane_s16::<IMM_INDEX, IMM_ROTATION>(
+            op1.as_signed(),
+            op2.as_signed(),
+            op3.as_signed(),
+        )
+        .as_unsigned()
+    }
+}
+#[doc = "Complex multiply-add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))]
+pub fn svcmla_lane_u32<const IMM_INDEX: i32, const IMM_ROTATION: i32>(
+    op1: svuint32_t,
+    op2: svuint32_t,
+    op3: svuint32_t,
+) -> svuint32_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    unsafe {
+        svcmla_lane_s32::<IMM_INDEX, IMM_ROTATION>(
+            op1.as_signed(),
+            op2.as_signed(),
+            op3.as_signed(),
+        )
+        .as_unsigned()
+    }
+}
+#[doc = "Complex multiply-add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))]
+pub fn svcmla_s8<const IMM_ROTATION: i32>(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv16i8")]
+        fn _svcmla_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t, imm_rotation: i32) -> svint8_t;
+    }
+    unsafe { _svcmla_s8(op1, op2, op3, IMM_ROTATION) }
+}
+#[doc = "Complex multiply-add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))]
+pub fn svcmla_s16<const IMM_ROTATION: i32>(
+    op1: svint16_t,
+    op2: svint16_t,
+    op3: svint16_t,
+) -> svint16_t {
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv8i16")]
+        fn _svcmla_s16(
+            op1: svint16_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            imm_rotation: i32,
+        ) -> svint16_t;
+    }
+    unsafe { _svcmla_s16(op1, op2, op3, IMM_ROTATION) }
+}
+#[doc = "Complex multiply-add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))]
+pub fn svcmla_s32<const IMM_ROTATION: i32>(
+    op1: svint32_t,
+    op2: svint32_t,
+    op3: svint32_t,
+) -> svint32_t {
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv4i32")]
+        fn _svcmla_s32(
+            op1: svint32_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            imm_rotation: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svcmla_s32(op1, op2, op3, IMM_ROTATION) }
+}
+#[doc = "Complex multiply-add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))]
+pub fn svcmla_s64<const IMM_ROTATION: i32>(
+    op1: svint64_t,
+    op2: svint64_t,
+    op3: svint64_t,
+) -> svint64_t {
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.cmla.x.nxv2i64")]
+        fn _svcmla_s64(
+            op1: svint64_t,
+            op2: svint64_t,
+            op3: svint64_t,
+            imm_rotation: i32,
+        ) -> svint64_t;
+    }
+    unsafe { _svcmla_s64(op1, op2, op3, IMM_ROTATION) }
+}
+#[doc = "Complex multiply-add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))]
+pub fn svcmla_u8<const IMM_ROTATION: i32>(
+    op1: svuint8_t,
+    op2: svuint8_t,
+    op3: svuint8_t,
+) -> svuint8_t {
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    unsafe {
+        svcmla_s8::<IMM_ROTATION>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
+    }
+}
+#[doc = "Complex multiply-add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))]
+pub fn svcmla_u16<const IMM_ROTATION: i32>(
+    op1: svuint16_t,
+    op2: svuint16_t,
+    op3: svuint16_t,
+) -> svuint16_t {
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    unsafe {
+        svcmla_s16::<IMM_ROTATION>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
+    }
+}
+#[doc = "Complex multiply-add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))]
+pub fn svcmla_u32<const IMM_ROTATION: i32>(
+    op1: svuint32_t,
+    op2: svuint32_t,
+    op3: svuint32_t,
+) -> svuint32_t {
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    unsafe {
+        svcmla_s32::<IMM_ROTATION>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
+    }
+}
+#[doc = "Complex multiply-add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))]
+pub fn svcmla_u64<const IMM_ROTATION: i32>(
+    op1: svuint64_t,
+    op2: svuint64_t,
+    op3: svuint64_t,
+) -> svuint64_t {
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    unsafe {
+        svcmla_s64::<IMM_ROTATION>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
+    }
+}
+#[doc = "Up convert long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtlt_f64[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fcvtlt))]
+pub fn svcvtlt_f64_f32_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat32_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtlt.f64f32")]
+        fn _svcvtlt_f64_f32_m(inactive: svfloat64_t, pg: svbool2_t, op: svfloat32_t)
+            -> svfloat64_t;
+    }
+    unsafe { _svcvtlt_f64_f32_m(inactive, pg.into(), op) }
+}
+#[doc = "Up convert long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtlt_f64[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fcvtlt))]
+pub fn svcvtlt_f64_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat64_t {
+    unsafe { svcvtlt_f64_f32_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Down convert and narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtnt_f32[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fcvtnt))]
+pub fn svcvtnt_f32_f64_m(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtnt.f32f64")]
+        fn _svcvtnt_f32_f64_m(even: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t;
+    }
+    unsafe { _svcvtnt_f32_f64_m(even, pg.into(), op) }
+}
+#[doc = "Down convert and narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtnt_f32[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fcvtnt))]
+pub fn svcvtnt_f32_f64_x(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
+    svcvtnt_f32_f64_m(even, pg, op)
+}
+#[doc = "Down convert, rounding to odd"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fcvtx))]
+pub fn svcvtx_f32_f64_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtx.f32f64")]
+        fn _svcvtx_f32_f64_m(inactive: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t;
+    }
+    unsafe { _svcvtx_f32_f64_m(inactive, pg.into(), op) }
+}
+#[doc = "Down convert, rounding to odd"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fcvtx))]
+pub fn svcvtx_f32_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
+    unsafe { svcvtx_f32_f64_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Down convert, rounding to odd"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fcvtx))]
+pub fn svcvtx_f32_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
+    svcvtx_f32_f64_m(svdup_n_f32(0.0), pg, op)
+}
+#[doc = "Down convert, rounding to odd (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtxnt_f32[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fcvtxnt))]
+pub fn svcvtxnt_f32_f64_m(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fcvtxnt.f32f64")]
+        fn _svcvtxnt_f32_f64_m(even: svfloat32_t, pg: svbool2_t, op: svfloat64_t) -> svfloat32_t;
+    }
+    unsafe { _svcvtxnt_f32_f64_m(even, pg.into(), op) }
+}
+#[doc = "Down convert, rounding to odd (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtxnt_f32[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fcvtxnt))]
+pub fn svcvtxnt_f32_f64_x(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> svfloat32_t {
+    svcvtxnt_f32_f64_m(even, pg, op)
+}
+#[doc = "Bitwise exclusive OR of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eor3))]
+pub fn sveor3_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv16i8")]
+        fn _sveor3_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
+    }
+    unsafe { _sveor3_s8(op1, op2, op3) }
+}
+#[doc = "Bitwise exclusive OR of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eor3))]
+pub fn sveor3_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    sveor3_s8(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Bitwise exclusive OR of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eor3))]
+pub fn sveor3_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv8i16")]
+        fn _sveor3_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
+    }
+    unsafe { _sveor3_s16(op1, op2, op3) }
+}
+#[doc = "Bitwise exclusive OR of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eor3))]
+pub fn sveor3_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    sveor3_s16(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Bitwise exclusive OR of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eor3))]
+pub fn sveor3_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv4i32")]
+        fn _sveor3_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
+    }
+    unsafe { _sveor3_s32(op1, op2, op3) }
+}
+#[doc = "Bitwise exclusive OR of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eor3))]
+pub fn sveor3_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    sveor3_s32(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Bitwise exclusive OR of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eor3))]
+pub fn sveor3_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eor3.nxv2i64")]
+        fn _sveor3_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
+    }
+    unsafe { _sveor3_s64(op1, op2, op3) }
+}
+#[doc = "Bitwise exclusive OR of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eor3))]
+pub fn sveor3_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    sveor3_s64(op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Bitwise exclusive OR of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eor3))]
+pub fn sveor3_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
+    unsafe { sveor3_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise exclusive OR of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eor3))]
+pub fn sveor3_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
+    sveor3_u8(op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Bitwise exclusive OR of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eor3))]
+pub fn sveor3_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
+    unsafe { sveor3_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise exclusive OR of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eor3))]
+pub fn sveor3_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
+    sveor3_u16(op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Bitwise exclusive OR of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eor3))]
+pub fn sveor3_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    unsafe { sveor3_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise exclusive OR of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eor3))]
+pub fn sveor3_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    sveor3_u32(op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Bitwise exclusive OR of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eor3))]
+pub fn sveor3_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    unsafe { sveor3_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise exclusive OR of three vectors"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eor3))]
+pub fn sveor3_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    sveor3_u64(op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Interleaving exclusive OR (bottom, top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eorbt))]
+pub fn sveorbt_s8(odd: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv16i8")]
+        fn _sveorbt_s8(odd: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _sveorbt_s8(odd, op1, op2) }
+}
+#[doc = "Interleaving exclusive OR (bottom, top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eorbt))]
+pub fn sveorbt_n_s8(odd: svint8_t, op1: svint8_t, op2: i8) -> svint8_t {
+    sveorbt_s8(odd, op1, svdup_n_s8(op2))
+}
+#[doc = "Interleaving exclusive OR (bottom, top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eorbt))]
+pub fn sveorbt_s16(odd: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv8i16")]
+        fn _sveorbt_s16(odd: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _sveorbt_s16(odd, op1, op2) }
+}
+#[doc = "Interleaving exclusive OR (bottom, top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eorbt))]
+pub fn sveorbt_n_s16(odd: svint16_t, op1: svint16_t, op2: i16) -> svint16_t {
+    sveorbt_s16(odd, op1, svdup_n_s16(op2))
+}
+#[doc = "Interleaving exclusive OR (bottom, top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eorbt))]
+pub fn sveorbt_s32(odd: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv4i32")]
+        fn _sveorbt_s32(odd: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _sveorbt_s32(odd, op1, op2) }
+}
+#[doc = "Interleaving exclusive OR (bottom, top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eorbt))]
+pub fn sveorbt_n_s32(odd: svint32_t, op1: svint32_t, op2: i32) -> svint32_t {
+    sveorbt_s32(odd, op1, svdup_n_s32(op2))
+}
+#[doc = "Interleaving exclusive OR (bottom, top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eorbt))]
+pub fn sveorbt_s64(odd: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eorbt.nxv2i64")]
+        fn _sveorbt_s64(odd: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _sveorbt_s64(odd, op1, op2) }
+}
+#[doc = "Interleaving exclusive OR (bottom, top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eorbt))]
+pub fn sveorbt_n_s64(odd: svint64_t, op1: svint64_t, op2: i64) -> svint64_t {
+    sveorbt_s64(odd, op1, svdup_n_s64(op2))
+}
+#[doc = "Interleaving exclusive OR (bottom, top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eorbt))]
+pub fn sveorbt_u8(odd: svuint8_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { sveorbt_s8(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleaving exclusive OR (bottom, top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eorbt))]
+pub fn sveorbt_n_u8(odd: svuint8_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    sveorbt_u8(odd, op1, svdup_n_u8(op2))
+}
+#[doc = "Interleaving exclusive OR (bottom, top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eorbt))]
+pub fn sveorbt_u16(odd: svuint16_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { sveorbt_s16(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleaving exclusive OR (bottom, top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eorbt))]
+pub fn sveorbt_n_u16(odd: svuint16_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    sveorbt_u16(odd, op1, svdup_n_u16(op2))
+}
+#[doc = "Interleaving exclusive OR (bottom, top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eorbt))]
+pub fn sveorbt_u32(odd: svuint32_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { sveorbt_s32(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleaving exclusive OR (bottom, top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eorbt))]
+pub fn sveorbt_n_u32(odd: svuint32_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    sveorbt_u32(odd, op1, svdup_n_u32(op2))
+}
+#[doc = "Interleaving exclusive OR (bottom, top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eorbt))]
+pub fn sveorbt_u64(odd: svuint64_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { sveorbt_s64(odd.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleaving exclusive OR (bottom, top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eorbt))]
+pub fn sveorbt_n_u64(odd: svuint64_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    sveorbt_u64(odd, op1, svdup_n_u64(op2))
+}
+#[doc = "Interleaving exclusive OR (top, bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eortb))]
+pub fn sveortb_s8(even: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv16i8")]
+        fn _sveortb_s8(even: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _sveortb_s8(even, op1, op2) }
+}
+#[doc = "Interleaving exclusive OR (top, bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eortb))]
+pub fn sveortb_n_s8(even: svint8_t, op1: svint8_t, op2: i8) -> svint8_t {
+    sveortb_s8(even, op1, svdup_n_s8(op2))
+}
+#[doc = "Interleaving exclusive OR (top, bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eortb))]
+pub fn sveortb_s16(even: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv8i16")]
+        fn _sveortb_s16(even: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _sveortb_s16(even, op1, op2) }
+}
+#[doc = "Interleaving exclusive OR (top, bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eortb))]
+pub fn sveortb_n_s16(even: svint16_t, op1: svint16_t, op2: i16) -> svint16_t {
+    sveortb_s16(even, op1, svdup_n_s16(op2))
+}
+#[doc = "Interleaving exclusive OR (top, bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eortb))]
+pub fn sveortb_s32(even: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv4i32")]
+        fn _sveortb_s32(even: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _sveortb_s32(even, op1, op2) }
+}
+#[doc = "Interleaving exclusive OR (top, bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eortb))]
+pub fn sveortb_n_s32(even: svint32_t, op1: svint32_t, op2: i32) -> svint32_t {
+    sveortb_s32(even, op1, svdup_n_s32(op2))
+}
+#[doc = "Interleaving exclusive OR (top, bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eortb))]
+pub fn sveortb_s64(even: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.eortb.nxv2i64")]
+        fn _sveortb_s64(even: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _sveortb_s64(even, op1, op2) }
+}
+#[doc = "Interleaving exclusive OR (top, bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eortb))]
+pub fn sveortb_n_s64(even: svint64_t, op1: svint64_t, op2: i64) -> svint64_t {
+    sveortb_s64(even, op1, svdup_n_s64(op2))
+}
+#[doc = "Interleaving exclusive OR (top, bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eortb))]
+pub fn sveortb_u8(even: svuint8_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { sveortb_s8(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleaving exclusive OR (top, bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eortb))]
+pub fn sveortb_n_u8(even: svuint8_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    sveortb_u8(even, op1, svdup_n_u8(op2))
+}
+#[doc = "Interleaving exclusive OR (top, bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eortb))]
+pub fn sveortb_u16(even: svuint16_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    unsafe { sveortb_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleaving exclusive OR (top, bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eortb))]
+pub fn sveortb_n_u16(even: svuint16_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    sveortb_u16(even, op1, svdup_n_u16(op2))
+}
+#[doc = "Interleaving exclusive OR (top, bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eortb))]
+pub fn sveortb_u32(even: svuint32_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { sveortb_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleaving exclusive OR (top, bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eortb))]
+pub fn sveortb_n_u32(even: svuint32_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    sveortb_u32(even, op1, svdup_n_u32(op2))
+}
+#[doc = "Interleaving exclusive OR (top, bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eortb))]
+pub fn sveortb_u64(even: svuint64_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { sveortb_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Interleaving exclusive OR (top, bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(eortb))]
+pub fn sveortb_n_u64(even: svuint64_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    sveortb_u64(even, op1, svdup_n_u64(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv16i8")]
+        fn _svhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svhadd_s8_m(pg, op1, op2) }
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svhadd_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svhadd_s8_m(pg, op1, op2)
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svhadd_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svhadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svhadd_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv8i16")]
+        fn _svhadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svhadd_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svhadd_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svhadd_s16_m(pg, op1, op2)
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svhadd_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svhadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svhadd_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv4i32")]
+        fn _svhadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svhadd_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svhadd_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svhadd_s32_m(pg, op1, op2)
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svhadd_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svhadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svhadd_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shadd.nxv2i64")]
+        fn _svhadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svhadd_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svhadd_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svhadd_s64_m(pg, op1, op2)
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svhadd_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svhadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shadd))]
+pub fn svhadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svhadd_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv16i8")]
+        fn _svhadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svhadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svhadd_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svhadd_u8_m(pg, op1, op2)
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svhadd_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svhadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svhadd_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv8i16")]
+        fn _svhadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svhadd_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svhadd_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svhadd_u16_m(pg, op1, op2)
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svhadd_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svhadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svhadd_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv4i32")]
+        fn _svhadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svhadd_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svhadd_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svhadd_u32_m(pg, op1, op2)
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svhadd_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svhadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svhadd_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhadd.nxv2i64")]
+        fn _svhadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svhadd_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svhadd_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svhadd_u64_m(pg, op1, op2)
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svhadd_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svhadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhadd))]
+pub fn svhadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svhadd_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Count matching elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(histcnt))]
+pub fn svhistcnt_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.histcnt.nxv4i32"
+        )]
+        fn _svhistcnt_s32_z(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svhistcnt_s32_z(pg.into(), op1, op2).as_unsigned() }
+}
+#[doc = "Count matching elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(histcnt))]
+pub fn svhistcnt_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.histcnt.nxv2i64"
+        )]
+        fn _svhistcnt_s64_z(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svhistcnt_s64_z(pg.into(), op1, op2).as_unsigned() }
+}
+#[doc = "Count matching elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(histcnt))]
+pub fn svhistcnt_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    unsafe { svhistcnt_s32_z(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Count matching elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(histcnt))]
+pub fn svhistcnt_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svhistcnt_s64_z(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Count matching elements in 128-bit segments"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistseg[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(histseg))]
+pub fn svhistseg_s8(op1: svint8_t, op2: svint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.histseg.nxv16i8"
+        )]
+        fn _svhistseg_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svhistseg_s8(op1, op2).as_unsigned() }
+}
+#[doc = "Count matching elements in 128-bit segments"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistseg[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(histseg))]
+pub fn svhistseg_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    unsafe { svhistseg_s8(op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv16i8")]
+        fn _svhsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svhsub_s8_m(pg, op1, op2) }
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svhsub_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svhsub_s8_m(pg, op1, op2)
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svhsub_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svhsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svhsub_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv8i16")]
+        fn _svhsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svhsub_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svhsub_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svhsub_s16_m(pg, op1, op2)
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svhsub_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svhsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svhsub_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv4i32")]
+        fn _svhsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svhsub_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svhsub_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svhsub_s32_m(pg, op1, op2)
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svhsub_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svhsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svhsub_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsub.nxv2i64")]
+        fn _svhsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svhsub_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svhsub_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svhsub_s64_m(pg, op1, op2)
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svhsub_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svhsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svhsub_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv16i8")]
+        fn _svhsub_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svhsub_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svhsub_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svhsub_u8_m(pg, op1, op2)
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svhsub_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svhsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svhsub_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv8i16")]
+        fn _svhsub_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svhsub_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svhsub_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svhsub_u16_m(pg, op1, op2)
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svhsub_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svhsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svhsub_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv4i32")]
+        fn _svhsub_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svhsub_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svhsub_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svhsub_u32_m(pg, op1, op2)
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svhsub_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svhsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svhsub_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsub.nxv2i64")]
+        fn _svhsub_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svhsub_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svhsub_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svhsub_u64_m(pg, op1, op2)
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svhsub_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svhsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Halving subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svhsub_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv16i8")]
+        fn _svhsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svhsubr_s8_m(pg, op1, op2) }
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svhsubr_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svhsubr_s8_m(pg, op1, op2)
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svhsubr_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svhsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svhsubr_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv8i16")]
+        fn _svhsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svhsubr_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svhsubr_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svhsubr_s16_m(pg, op1, op2)
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svhsubr_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svhsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svhsubr_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv4i32")]
+        fn _svhsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svhsubr_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svhsubr_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svhsubr_s32_m(pg, op1, op2)
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svhsubr_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svhsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svhsubr_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shsubr.nxv2i64")]
+        fn _svhsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svhsubr_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svhsubr_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svhsubr_s64_m(pg, op1, op2)
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svhsubr_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svhsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shsub))]
+pub fn svhsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svhsubr_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv16i8")]
+        fn _svhsubr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svhsubr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svhsubr_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svhsubr_u8_m(pg, op1, op2)
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svhsubr_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svhsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svhsubr_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv8i16")]
+        fn _svhsubr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svhsubr_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svhsubr_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svhsubr_u16_m(pg, op1, op2)
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svhsubr_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svhsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svhsubr_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv4i32")]
+        fn _svhsubr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svhsubr_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svhsubr_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svhsubr_u32_m(pg, op1, op2)
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svhsubr_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svhsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svhsubr_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uhsubr.nxv2i64")]
+        fn _svhsubr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svhsubr_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svhsubr_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svhsubr_u64_m(pg, op1, op2)
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svhsubr_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svhsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Halving subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uhsub))]
+pub fn svhsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svhsubr_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_s64index_f64(
+    pg: svbool_t,
+    base: *const f64,
+    indices: svint64_t,
+) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2f64"
+        )]
+        fn _svldnt1_gather_s64index_f64(
+            pg: svbool2_t,
+            base: *const f64,
+            indices: svint64_t,
+        ) -> svfloat64_t;
+    }
+    _svldnt1_gather_s64index_f64(pg.into(), base, indices)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_s64index_s64(
+    pg: svbool_t,
+    base: *const i64,
+    indices: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i64"
+        )]
+        fn _svldnt1_gather_s64index_s64(
+            pg: svbool2_t,
+            base: *const i64,
+            indices: svint64_t,
+        ) -> svint64_t;
+    }
+    _svldnt1_gather_s64index_s64(pg.into(), base, indices)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_s64index_u64(
+    pg: svbool_t,
+    base: *const u64,
+    indices: svint64_t,
+) -> svuint64_t {
+    svldnt1_gather_s64index_s64(pg, base.as_signed(), indices).as_unsigned()
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_u64index_f64(
+    pg: svbool_t,
+    base: *const f64,
+    indices: svuint64_t,
+) -> svfloat64_t {
+    svldnt1_gather_s64index_f64(pg, base, indices.as_signed())
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_u64index_s64(
+    pg: svbool_t,
+    base: *const i64,
+    indices: svuint64_t,
+) -> svint64_t {
+    svldnt1_gather_s64index_s64(pg, base, indices.as_signed())
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_u64index_u64(
+    pg: svbool_t,
+    base: *const u64,
+    indices: svuint64_t,
+) -> svuint64_t {
+    svldnt1_gather_s64index_s64(pg, base.as_signed(), indices.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_s64offset_f64(
+    pg: svbool_t,
+    base: *const f64,
+    offsets: svint64_t,
+) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2f64"
+        )]
+        fn _svldnt1_gather_s64offset_f64(
+            pg: svbool2_t,
+            base: *const f64,
+            offsets: svint64_t,
+        ) -> svfloat64_t;
+    }
+    _svldnt1_gather_s64offset_f64(pg.into(), base, offsets)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const i64,
+    offsets: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i64"
+        )]
+        fn _svldnt1_gather_s64offset_s64(
+            pg: svbool2_t,
+            base: *const i64,
+            offsets: svint64_t,
+        ) -> svint64_t;
+    }
+    _svldnt1_gather_s64offset_s64(pg.into(), base, offsets)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[s64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const u64,
+    offsets: svint64_t,
+) -> svuint64_t {
+    svldnt1_gather_s64offset_s64(pg, base.as_signed(), offsets).as_unsigned()
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1_gather_u32offset_f32(
+    pg: svbool_t,
+    base: *const f32,
+    offsets: svuint32_t,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4f32"
+        )]
+        fn _svldnt1_gather_u32offset_f32(
+            pg: svbool4_t,
+            base: *const f32,
+            offsets: svint32_t,
+        ) -> svfloat32_t;
+    }
+    _svldnt1_gather_u32offset_f32(pg.into(), base, offsets.as_signed())
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1_gather_u32offset_s32(
+    pg: svbool_t,
+    base: *const i32,
+    offsets: svuint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i32"
+        )]
+        fn _svldnt1_gather_u32offset_s32(
+            pg: svbool4_t,
+            base: *const i32,
+            offsets: svint32_t,
+        ) -> svint32_t;
+    }
+    _svldnt1_gather_u32offset_s32(pg.into(), base, offsets.as_signed())
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u32]offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1_gather_u32offset_u32(
+    pg: svbool_t,
+    base: *const u32,
+    offsets: svuint32_t,
+) -> svuint32_t {
+    svldnt1_gather_u32offset_s32(pg, base.as_signed(), offsets).as_unsigned()
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_u64offset_f64(
+    pg: svbool_t,
+    base: *const f64,
+    offsets: svuint64_t,
+) -> svfloat64_t {
+    svldnt1_gather_s64offset_f64(pg, base, offsets.as_signed())
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const i64,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svldnt1_gather_s64offset_s64(pg, base, offsets.as_signed())
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather_[u64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const u64,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svldnt1_gather_s64offset_s64(pg, base.as_signed(), offsets.as_signed()).as_unsigned()
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_f32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svfloat32_t {
+    svldnt1_gather_u32base_offset_f32(pg, bases, 0)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
+    svldnt1_gather_u32base_offset_s32(pg, bases, 0)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
+    svldnt1_gather_u32base_offset_u32(pg, bases, 0)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_f64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svfloat64_t {
+    svldnt1_gather_u64base_offset_f64(pg, bases, 0)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svldnt1_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svldnt1_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_f32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1_gather_u32base_index_f32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svfloat32_t {
+    svldnt1_gather_u32base_offset_f32(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1_gather_u32base_index_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svint32_t {
+    svldnt1_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_index_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1_gather_u32base_index_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svuint32_t {
+    svldnt1_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_f64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_u64base_index_f64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svfloat64_t {
+    svldnt1_gather_u64base_offset_f64(pg, bases, index.unchecked_shl(3))
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svint64_t {
+    svldnt1_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(3))
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svuint64_t {
+    svldnt1_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(3))
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_f32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1_gather_u32base_offset_f32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4f32.nxv4i32"
+        )]
+        fn _svldnt1_gather_u32base_offset_f32(
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        ) -> svfloat32_t;
+    }
+    _svldnt1_gather_u32base_offset_f32(pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1_gather_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32"
+        )]
+        fn _svldnt1_gather_u32base_offset_s32(
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        ) -> svint32_t;
+    }
+    _svldnt1_gather_u32base_offset_s32(pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u32base]_offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1_gather_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svuint32_t {
+    svldnt1_gather_u32base_offset_s32(pg, bases, offset).as_unsigned()
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_f64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_u64base_offset_f64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2f64.nxv2i64"
+        )]
+        fn _svldnt1_gather_u64base_offset_f64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> svfloat64_t;
+    }
+    _svldnt1_gather_u64base_offset_f64(pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64"
+        )]
+        fn _svldnt1_gather_u64base_offset_s64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> svint64_t;
+    }
+    _svldnt1_gather_u64base_offset_s64(pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Unextended load, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1d))]
+pub unsafe fn svldnt1_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    svldnt1_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[s64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sb))]
+pub unsafe fn svldnt1sb_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i8"
+        )]
+        fn _svldnt1sb_gather_s64offset_s64(
+            pg: svbool2_t,
+            base: *const i8,
+            offsets: svint64_t,
+        ) -> nxv2i8;
+    }
+    simd_cast(_svldnt1sb_gather_s64offset_s64(pg.into(), base, offsets))
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i16"
+        )]
+        fn _svldnt1sh_gather_s64offset_s64(
+            pg: svbool2_t,
+            base: *const i16,
+            offsets: svint64_t,
+        ) -> nxv2i16;
+    }
+    simd_cast(_svldnt1sh_gather_s64offset_s64(pg.into(), base, offsets))
+}
+#[doc = "Load 32-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sw))]
+pub unsafe fn svldnt1sw_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const i32,
+    offsets: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i32"
+        )]
+        fn _svldnt1sw_gather_s64offset_s64(
+            pg: svbool2_t,
+            base: *const i32,
+            offsets: svint64_t,
+        ) -> nxv2i32;
+    }
+    simd_cast(_svldnt1sw_gather_s64offset_s64(pg.into(), base, offsets))
+}
+#[doc = "Load 8-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[s64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sb))]
+pub unsafe fn svldnt1sb_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svint64_t,
+) -> svuint64_t {
+    svldnt1sb_gather_s64offset_s64(pg, base, offsets).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svint64_t,
+) -> svuint64_t {
+    svldnt1sh_gather_s64offset_s64(pg, base, offsets).as_unsigned()
+}
+#[doc = "Load 32-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sw))]
+pub unsafe fn svldnt1sw_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const i32,
+    offsets: svint64_t,
+) -> svuint64_t {
+    svldnt1sw_gather_s64offset_s64(pg, base, offsets).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sb))]
+pub unsafe fn svldnt1sb_gather_u32offset_s32(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svuint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8"
+        )]
+        fn _svldnt1sb_gather_u32offset_s32(
+            pg: svbool4_t,
+            base: *const i8,
+            offsets: svint32_t,
+        ) -> nxv4i8;
+    }
+    simd_cast(_svldnt1sb_gather_u32offset_s32(
+        pg.into(),
+        base,
+        offsets.as_signed(),
+    ))
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_u32offset_s32(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svuint32_t,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16"
+        )]
+        fn _svldnt1sh_gather_u32offset_s32(
+            pg: svbool4_t,
+            base: *const i16,
+            offsets: svint32_t,
+        ) -> nxv4i16;
+    }
+    simd_cast(_svldnt1sh_gather_u32offset_s32(
+        pg.into(),
+        base,
+        offsets.as_signed(),
+    ))
+}
+#[doc = "Load 8-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sb))]
+pub unsafe fn svldnt1sb_gather_u32offset_u32(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svuint32_t,
+) -> svuint32_t {
+    svldnt1sb_gather_u32offset_s32(pg, base, offsets).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_u32offset_u32(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svuint32_t,
+) -> svuint32_t {
+    svldnt1sh_gather_u32offset_s32(pg, base, offsets).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sb))]
+pub unsafe fn svldnt1sb_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svldnt1sb_gather_s64offset_s64(pg, base, offsets.as_signed())
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svldnt1sh_gather_s64offset_s64(pg, base, offsets.as_signed())
+}
+#[doc = "Load 32-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sw))]
+pub unsafe fn svldnt1sw_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const i32,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svldnt1sw_gather_s64offset_s64(pg, base, offsets.as_signed())
+}
+#[doc = "Load 8-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather_[u64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sb))]
+pub unsafe fn svldnt1sb_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const i8,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svldnt1sb_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const i16,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svldnt1sh_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned()
+}
+#[doc = "Load 32-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sw))]
+pub unsafe fn svldnt1sw_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const i32,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svldnt1sw_gather_s64offset_s64(pg, base, offsets.as_signed()).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sb))]
+pub unsafe fn svldnt1sb_gather_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i8.nxv4i32"
+        )]
+        fn _svldnt1sb_gather_u32base_offset_s32(
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        ) -> nxv4i8;
+    }
+    simd_cast(_svldnt1sb_gather_u32base_offset_s32(
+        pg.into(),
+        bases.as_signed(),
+        offset,
+    ))
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32"
+        )]
+        fn _svldnt1sh_gather_u32base_offset_s32(
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        ) -> nxv4i16;
+    }
+    simd_cast(_svldnt1sh_gather_u32base_offset_s32(
+        pg.into(),
+        bases.as_signed(),
+        offset,
+    ))
+}
+#[doc = "Load 8-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sb))]
+pub unsafe fn svldnt1sb_gather_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svuint32_t {
+    svldnt1sb_gather_u32base_offset_s32(pg, bases, offset).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svuint32_t {
+    svldnt1sh_gather_u32base_offset_s32(pg, bases, offset).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sb))]
+pub unsafe fn svldnt1sb_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64"
+        )]
+        fn _svldnt1sb_gather_u64base_offset_s64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> nxv2i8;
+    }
+    simd_cast(_svldnt1sb_gather_u64base_offset_s64(
+        pg.into(),
+        bases.as_signed(),
+        offset,
+    ))
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64"
+        )]
+        fn _svldnt1sh_gather_u64base_offset_s64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> nxv2i16;
+    }
+    simd_cast(_svldnt1sh_gather_u64base_offset_s64(
+        pg.into(),
+        bases.as_signed(),
+        offset,
+    ))
+}
+#[doc = "Load 32-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sw))]
+pub unsafe fn svldnt1sw_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64"
+        )]
+        fn _svldnt1sw_gather_u64base_offset_s64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> nxv2i32;
+    }
+    simd_cast(_svldnt1sw_gather_u64base_offset_s64(
+        pg.into(),
+        bases.as_signed(),
+        offset,
+    ))
+}
+#[doc = "Load 8-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sb))]
+pub unsafe fn svldnt1sb_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    svldnt1sb_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    svldnt1sh_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
+}
+#[doc = "Load 32-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sw))]
+pub unsafe fn svldnt1sw_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    svldnt1sw_gather_u64base_offset_s64(pg, bases, offset).as_unsigned()
+}
+#[doc = "Load 8-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sb))]
+pub unsafe fn svldnt1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
+    svldnt1sb_gather_u32base_offset_s32(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
+    svldnt1sh_gather_u32base_offset_s32(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u32base]_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sb))]
+pub unsafe fn svldnt1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
+    svldnt1sb_gather_u32base_offset_u32(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
+    svldnt1sh_gather_u32base_offset_u32(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sb))]
+pub unsafe fn svldnt1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svldnt1sb_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svldnt1sh_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Load 32-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sw))]
+pub unsafe fn svldnt1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svldnt1sw_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sb_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sb))]
+pub unsafe fn svldnt1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svldnt1sb_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svldnt1sh_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Load 32-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sw))]
+pub unsafe fn svldnt1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svldnt1sw_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_s64index_s64(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i16"
+        )]
+        fn _svldnt1sh_gather_s64index_s64(
+            pg: svbool2_t,
+            base: *const i16,
+            indices: svint64_t,
+        ) -> nxv2i16;
+    }
+    simd_cast(_svldnt1sh_gather_s64index_s64(pg.into(), base, indices))
+}
+#[doc = "Load 32-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sw))]
+pub unsafe fn svldnt1sw_gather_s64index_s64(
+    pg: svbool_t,
+    base: *const i32,
+    indices: svint64_t,
+) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i32"
+        )]
+        fn _svldnt1sw_gather_s64index_s64(
+            pg: svbool2_t,
+            base: *const i32,
+            indices: svint64_t,
+        ) -> nxv2i32;
+    }
+    simd_cast(_svldnt1sw_gather_s64index_s64(pg.into(), base, indices))
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[s64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_s64index_u64(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svint64_t,
+) -> svuint64_t {
+    svldnt1sh_gather_s64index_s64(pg, base, indices).as_unsigned()
+}
+#[doc = "Load 32-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[s64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sw))]
+pub unsafe fn svldnt1sw_gather_s64index_u64(
+    pg: svbool_t,
+    base: *const i32,
+    indices: svint64_t,
+) -> svuint64_t {
+    svldnt1sw_gather_s64index_s64(pg, base, indices).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_u64index_s64(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svuint64_t,
+) -> svint64_t {
+    svldnt1sh_gather_s64index_s64(pg, base, indices.as_signed())
+}
+#[doc = "Load 32-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sw))]
+pub unsafe fn svldnt1sw_gather_u64index_s64(
+    pg: svbool_t,
+    base: *const i32,
+    indices: svuint64_t,
+) -> svint64_t {
+    svldnt1sw_gather_s64index_s64(pg, base, indices.as_signed())
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather_[u64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_u64index_u64(
+    pg: svbool_t,
+    base: *const i16,
+    indices: svuint64_t,
+) -> svuint64_t {
+    svldnt1sh_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned()
+}
+#[doc = "Load 32-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather_[u64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sw))]
+pub unsafe fn svldnt1sw_gather_u64index_u64(
+    pg: svbool_t,
+    base: *const i32,
+    indices: svuint64_t,
+) -> svuint64_t {
+    svldnt1sw_gather_s64index_s64(pg, base, indices.as_signed()).as_unsigned()
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_index_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_u32base_index_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svint32_t {
+    svldnt1sh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u32base]_index_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_u32base_index_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svuint32_t {
+    svldnt1sh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svint64_t {
+    svldnt1sh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 32-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sw))]
+pub unsafe fn svldnt1sw_gather_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svint64_t {
+    svldnt1sw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Load 16-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sh_gather[_u64base]_index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sh))]
+pub unsafe fn svldnt1sh_gather_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svuint64_t {
+    svldnt1sh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 32-bit data and sign-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1sw_gather[_u64base]_index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1sw))]
+pub unsafe fn svldnt1sw_gather_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svuint64_t {
+    svldnt1sw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Load 8-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[s64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1b))]
+pub unsafe fn svldnt1ub_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svint64_t,
+) -> svint64_t {
+    svldnt1ub_gather_s64offset_u64(pg, base, offsets).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svint64_t,
+) -> svint64_t {
+    svldnt1uh_gather_s64offset_u64(pg, base, offsets).as_signed()
+}
+#[doc = "Load 32-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1uw_gather_s64offset_s64(
+    pg: svbool_t,
+    base: *const u32,
+    offsets: svint64_t,
+) -> svint64_t {
+    svldnt1uw_gather_s64offset_u64(pg, base, offsets).as_signed()
+}
+#[doc = "Load 8-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[s64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1b))]
+pub unsafe fn svldnt1ub_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svint64_t,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i8"
+        )]
+        fn _svldnt1ub_gather_s64offset_u64(
+            pg: svbool2_t,
+            base: *const i8,
+            offsets: svint64_t,
+        ) -> nxv2i8;
+    }
+    simd_cast::<nxv2u8, _>(
+        _svldnt1ub_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svint64_t,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i16"
+        )]
+        fn _svldnt1uh_gather_s64offset_u64(
+            pg: svbool2_t,
+            base: *const i16,
+            offsets: svint64_t,
+        ) -> nxv2i16;
+    }
+    simd_cast::<nxv2u16, _>(
+        _svldnt1uh_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(),
+    )
+}
+#[doc = "Load 32-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1uw_gather_s64offset_u64(
+    pg: svbool_t,
+    base: *const u32,
+    offsets: svint64_t,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.nxv2i32"
+        )]
+        fn _svldnt1uw_gather_s64offset_u64(
+            pg: svbool2_t,
+            base: *const i32,
+            offsets: svint64_t,
+        ) -> nxv2i32;
+    }
+    simd_cast::<nxv2u32, _>(
+        _svldnt1uw_gather_s64offset_u64(pg.into(), base.as_signed(), offsets).as_unsigned(),
+    )
+}
+#[doc = "Load 8-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1b))]
+pub unsafe fn svldnt1ub_gather_u32offset_s32(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svuint32_t,
+) -> svint32_t {
+    svldnt1ub_gather_u32offset_u32(pg, base, offsets).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u32]offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_u32offset_s32(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svuint32_t,
+) -> svint32_t {
+    svldnt1uh_gather_u32offset_u32(pg, base, offsets).as_signed()
+}
+#[doc = "Load 8-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1b))]
+pub unsafe fn svldnt1ub_gather_u32offset_u32(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svuint32_t,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i8"
+        )]
+        fn _svldnt1ub_gather_u32offset_u32(
+            pg: svbool4_t,
+            base: *const i8,
+            offsets: svint32_t,
+        ) -> nxv4i8;
+    }
+    simd_cast::<nxv4u8, _>(
+        _svldnt1ub_gather_u32offset_u32(pg.into(), base.as_signed(), offsets.as_signed())
+            .as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u32]offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_u32offset_u32(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svuint32_t,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16"
+        )]
+        fn _svldnt1uh_gather_u32offset_u32(
+            pg: svbool4_t,
+            base: *const i16,
+            offsets: svint32_t,
+        ) -> nxv4i16;
+    }
+    simd_cast::<nxv4u16, _>(
+        _svldnt1uh_gather_u32offset_u32(pg.into(), base.as_signed(), offsets.as_signed())
+            .as_unsigned(),
+    )
+}
+#[doc = "Load 8-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1b))]
+pub unsafe fn svldnt1ub_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svldnt1ub_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svldnt1uh_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed()
+}
+#[doc = "Load 32-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1uw_gather_u64offset_s64(
+    pg: svbool_t,
+    base: *const u32,
+    offsets: svuint64_t,
+) -> svint64_t {
+    svldnt1uw_gather_s64offset_u64(pg, base, offsets.as_signed()).as_signed()
+}
+#[doc = "Load 8-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather_[u64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1b))]
+pub unsafe fn svldnt1ub_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const u8,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svldnt1ub_gather_s64offset_u64(pg, base, offsets.as_signed())
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const u16,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svldnt1uh_gather_s64offset_u64(pg, base, offsets.as_signed())
+}
+#[doc = "Load 32-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1uw_gather_u64offset_u64(
+    pg: svbool_t,
+    base: *const u32,
+    offsets: svuint64_t,
+) -> svuint64_t {
+    svldnt1uw_gather_s64offset_u64(pg, base, offsets.as_signed())
+}
+#[doc = "Load 8-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1b))]
+pub unsafe fn svldnt1ub_gather_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svint32_t {
+    svldnt1ub_gather_u32base_offset_u32(pg, bases, offset).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_offset_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svint32_t {
+    svldnt1uh_gather_u32base_offset_u32(pg, bases, offset).as_signed()
+}
+#[doc = "Load 8-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1b))]
+pub unsafe fn svldnt1ub_gather_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i8.nxv4i32"
+        )]
+        fn _svldnt1ub_gather_u32base_offset_u32(
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        ) -> nxv4i8;
+    }
+    simd_cast::<nxv4u8, _>(
+        _svldnt1ub_gather_u32base_offset_u32(pg.into(), bases.as_signed(), offset).as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_offset_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32"
+        )]
+        fn _svldnt1uh_gather_u32base_offset_u32(
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        ) -> nxv4i16;
+    }
+    simd_cast::<nxv4u16, _>(
+        _svldnt1uh_gather_u32base_offset_u32(pg.into(), bases.as_signed(), offset).as_unsigned(),
+    )
+}
+#[doc = "Load 8-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1b))]
+pub unsafe fn svldnt1ub_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    svldnt1ub_gather_u64base_offset_u64(pg, bases, offset).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    svldnt1uh_gather_u64base_offset_u64(pg, bases, offset).as_signed()
+}
+#[doc = "Load 32-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_offset_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1uw_gather_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svint64_t {
+    svldnt1uw_gather_u64base_offset_u64(pg, bases, offset).as_signed()
+}
+#[doc = "Load 8-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1b))]
+pub unsafe fn svldnt1ub_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i8.nxv2i64"
+        )]
+        fn _svldnt1ub_gather_u64base_offset_u64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> nxv2i8;
+    }
+    simd_cast::<nxv2u8, _>(
+        _svldnt1ub_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64"
+        )]
+        fn _svldnt1uh_gather_u64base_offset_u64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> nxv2i16;
+    }
+    simd_cast::<nxv2u16, _>(
+        _svldnt1uh_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(),
+    )
+}
+#[doc = "Load 32-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_offset_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1uw_gather_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64"
+        )]
+        fn _svldnt1uw_gather_u64base_offset_u64(
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        ) -> nxv2i32;
+    }
+    simd_cast::<nxv2u32, _>(
+        _svldnt1uw_gather_u64base_offset_u64(pg.into(), bases.as_signed(), offset).as_unsigned(),
+    )
+}
+#[doc = "Load 8-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1b))]
+pub unsafe fn svldnt1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
+    svldnt1ub_gather_u32base_offset_s32(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint32_t {
+    svldnt1uh_gather_u32base_offset_s32(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u32base]_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1b))]
+pub unsafe fn svldnt1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
+    svldnt1ub_gather_u32base_offset_u32(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuint32_t {
+    svldnt1uh_gather_u32base_offset_u32(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1b))]
+pub unsafe fn svldnt1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svldnt1ub_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svldnt1uh_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Load 32-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint64_t {
+    svldnt1uw_gather_u64base_offset_s64(pg, bases, 0)
+}
+#[doc = "Load 8-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1ub_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1b))]
+pub unsafe fn svldnt1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svldnt1ub_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svldnt1uh_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Load 32-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuint64_t {
+    svldnt1uw_gather_u64base_offset_u64(pg, bases, 0)
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_s64index_s64(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svint64_t,
+) -> svint64_t {
+    svldnt1uh_gather_s64index_u64(pg, base, indices).as_signed()
+}
+#[doc = "Load 32-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1uw_gather_s64index_s64(
+    pg: svbool_t,
+    base: *const u32,
+    indices: svint64_t,
+) -> svint64_t {
+    svldnt1uw_gather_s64index_u64(pg, base, indices).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[s64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_s64index_u64(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svint64_t,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i16"
+        )]
+        fn _svldnt1uh_gather_s64index_u64(
+            pg: svbool2_t,
+            base: *const i16,
+            indices: svint64_t,
+        ) -> nxv2i16;
+    }
+    simd_cast::<nxv2u16, _>(
+        _svldnt1uh_gather_s64index_u64(pg.into(), base.as_signed(), indices).as_unsigned(),
+    )
+}
+#[doc = "Load 32-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[s64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1uw_gather_s64index_u64(
+    pg: svbool_t,
+    base: *const u32,
+    indices: svint64_t,
+) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ldnt1.gather.index.nxv2i32"
+        )]
+        fn _svldnt1uw_gather_s64index_u64(
+            pg: svbool2_t,
+            base: *const i32,
+            indices: svint64_t,
+        ) -> nxv2i32;
+    }
+    simd_cast::<nxv2u32, _>(
+        _svldnt1uw_gather_s64index_u64(pg.into(), base.as_signed(), indices).as_unsigned(),
+    )
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_u64index_s64(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svuint64_t,
+) -> svint64_t {
+    svldnt1uh_gather_s64index_u64(pg, base, indices.as_signed()).as_signed()
+}
+#[doc = "Load 32-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1uw_gather_u64index_s64(
+    pg: svbool_t,
+    base: *const u32,
+    indices: svuint64_t,
+) -> svint64_t {
+    svldnt1uw_gather_s64index_u64(pg, base, indices.as_signed()).as_signed()
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather_[u64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_u64index_u64(
+    pg: svbool_t,
+    base: *const u16,
+    indices: svuint64_t,
+) -> svuint64_t {
+    svldnt1uh_gather_s64index_u64(pg, base, indices.as_signed())
+}
+#[doc = "Load 32-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather_[u64]index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1uw_gather_u64index_u64(
+    pg: svbool_t,
+    base: *const u32,
+    indices: svuint64_t,
+) -> svuint64_t {
+    svldnt1uw_gather_s64index_u64(pg, base, indices.as_signed())
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_index_s32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_u32base_index_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svint32_t {
+    svldnt1uh_gather_u32base_offset_s32(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u32base]_index_u32)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_u32base_index_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+) -> svuint32_t {
+    svldnt1uh_gather_u32base_offset_u32(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svint64_t {
+    svldnt1uh_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 32-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_index_s64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1uw_gather_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svint64_t {
+    svldnt1uw_gather_u64base_offset_s64(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Load 16-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uh_gather[_u64base]_index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1h))]
+pub unsafe fn svldnt1uh_gather_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svuint64_t {
+    svldnt1uh_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(1))
+}
+#[doc = "Load 32-bit data and zero-extend, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svldnt1uw_gather[_u64base]_index_u64)"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ldnt1w))]
+pub unsafe fn svldnt1uw_gather_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+) -> svuint64_t {
+    svldnt1uw_gather_u64base_offset_u64(pg, bases, index.unchecked_shl(2))
+}
+#[doc = "Base 2 logarithm as integer"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(flogb))]
+pub fn svlogb_f32_m(inactive: svint32_t, pg: svbool_t, op: svfloat32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.flogb.nxv4f32")]
+        fn _svlogb_f32_m(inactive: svint32_t, pg: svbool4_t, op: svfloat32_t) -> svint32_t;
+    }
+    unsafe { _svlogb_f32_m(inactive, pg.into(), op) }
+}
+#[doc = "Base 2 logarithm as integer"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(flogb))]
+pub fn svlogb_f32_x(pg: svbool_t, op: svfloat32_t) -> svint32_t {
+    unsafe { svlogb_f32_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Base 2 logarithm as integer"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(flogb))]
+pub fn svlogb_f32_z(pg: svbool_t, op: svfloat32_t) -> svint32_t {
+    svlogb_f32_m(svdup_n_s32(0), pg, op)
+}
+#[doc = "Base 2 logarithm as integer"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(flogb))]
+pub fn svlogb_f64_m(inactive: svint64_t, pg: svbool_t, op: svfloat64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.flogb.nxv2f64")]
+        fn _svlogb_f64_m(inactive: svint64_t, pg: svbool2_t, op: svfloat64_t) -> svint64_t;
+    }
+    unsafe { _svlogb_f64_m(inactive, pg.into(), op) }
+}
+#[doc = "Base 2 logarithm as integer"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(flogb))]
+pub fn svlogb_f64_x(pg: svbool_t, op: svfloat64_t) -> svint64_t {
+    unsafe { svlogb_f64_m(simd_reinterpret(op), pg, op) }
+}
+#[doc = "Base 2 logarithm as integer"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(flogb))]
+pub fn svlogb_f64_z(pg: svbool_t, op: svfloat64_t) -> svint64_t {
+    svlogb_f64_m(svdup_n_s64(0), pg, op)
+}
+#[doc = "Detect any matching elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(match))]
+pub fn svmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.match.nxv16i8")]
+        fn _svmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t;
+    }
+    unsafe { _svmatch_s8(pg, op1, op2) }
+}
+#[doc = "Detect any matching elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(match))]
+pub fn svmatch_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.match.nxv8i16")]
+        fn _svmatch_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t;
+    }
+    unsafe { _svmatch_s16(pg.into(), op1, op2).into() }
+}
+#[doc = "Detect any matching elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(match))]
+pub fn svmatch_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t {
+    unsafe { svmatch_s8(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Detect any matching elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(match))]
+pub fn svmatch_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t {
+    unsafe { svmatch_s16(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Maximum number pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fmaxnmp))]
+pub fn svmaxnmp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.fmaxnmp.nxv4f32"
+        )]
+        fn _svmaxnmp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svmaxnmp_f32_m(pg.into(), op1, op2) }
+}
+#[doc = "Maximum number pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fmaxnmp))]
+pub fn svmaxnmp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svmaxnmp_f32_m(pg, op1, op2)
+}
+#[doc = "Maximum number pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fmaxnmp))]
+pub fn svmaxnmp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.fmaxnmp.nxv2f64"
+        )]
+        fn _svmaxnmp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svmaxnmp_f64_m(pg.into(), op1, op2) }
+}
+#[doc = "Maximum number pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fmaxnmp))]
+pub fn svmaxnmp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svmaxnmp_f64_m(pg, op1, op2)
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fmaxp))]
+pub fn svmaxp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxp.nxv4f32")]
+        fn _svmaxp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svmaxp_f32_m(pg.into(), op1, op2) }
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fmaxp))]
+pub fn svmaxp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svmaxp_f32_m(pg, op1, op2)
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fmaxp))]
+pub fn svmaxp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fmaxp.nxv2f64")]
+        fn _svmaxp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svmaxp_f64_m(pg.into(), op1, op2) }
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fmaxp))]
+pub fn svmaxp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svmaxp_f64_m(pg, op1, op2)
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smaxp))]
+pub fn svmaxp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv16i8")]
+        fn _svmaxp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svmaxp_s8_m(pg, op1, op2) }
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smaxp))]
+pub fn svmaxp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svmaxp_s8_m(pg, op1, op2)
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smaxp))]
+pub fn svmaxp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv8i16")]
+        fn _svmaxp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svmaxp_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smaxp))]
+pub fn svmaxp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svmaxp_s16_m(pg, op1, op2)
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smaxp))]
+pub fn svmaxp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv4i32")]
+        fn _svmaxp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svmaxp_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smaxp))]
+pub fn svmaxp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svmaxp_s32_m(pg, op1, op2)
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smaxp))]
+pub fn svmaxp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smaxp.nxv2i64")]
+        fn _svmaxp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svmaxp_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smaxp))]
+pub fn svmaxp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svmaxp_s64_m(pg, op1, op2)
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umaxp))]
+pub fn svmaxp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv16i8")]
+        fn _svmaxp_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svmaxp_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umaxp))]
+pub fn svmaxp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svmaxp_u8_m(pg, op1, op2)
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umaxp))]
+pub fn svmaxp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv8i16")]
+        fn _svmaxp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svmaxp_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umaxp))]
+pub fn svmaxp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svmaxp_u16_m(pg, op1, op2)
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umaxp))]
+pub fn svmaxp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv4i32")]
+        fn _svmaxp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svmaxp_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umaxp))]
+pub fn svmaxp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svmaxp_u32_m(pg, op1, op2)
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umaxp))]
+pub fn svmaxp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umaxp.nxv2i64")]
+        fn _svmaxp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svmaxp_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Maximum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umaxp))]
+pub fn svmaxp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svmaxp_u64_m(pg, op1, op2)
+}
+#[doc = "Minimum number pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fminnmp))]
+pub fn svminnmp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.fminnmp.nxv4f32"
+        )]
+        fn _svminnmp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svminnmp_f32_m(pg.into(), op1, op2) }
+}
+#[doc = "Minimum number pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fminnmp))]
+pub fn svminnmp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svminnmp_f32_m(pg, op1, op2)
+}
+#[doc = "Minimum number pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fminnmp))]
+pub fn svminnmp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.fminnmp.nxv2f64"
+        )]
+        fn _svminnmp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svminnmp_f64_m(pg.into(), op1, op2) }
+}
+#[doc = "Minimum number pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fminnmp))]
+pub fn svminnmp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svminnmp_f64_m(pg, op1, op2)
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fminp))]
+pub fn svminp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminp.nxv4f32")]
+        fn _svminp_f32_m(pg: svbool4_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t;
+    }
+    unsafe { _svminp_f32_m(pg.into(), op1, op2) }
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fminp))]
+pub fn svminp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    svminp_f32_m(pg, op1, op2)
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fminp))]
+pub fn svminp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.fminp.nxv2f64")]
+        fn _svminp_f64_m(pg: svbool2_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t;
+    }
+    unsafe { _svminp_f64_m(pg.into(), op1, op2) }
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fminp))]
+pub fn svminp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    svminp_f64_m(pg, op1, op2)
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sminp))]
+pub fn svminp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv16i8")]
+        fn _svminp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svminp_s8_m(pg, op1, op2) }
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sminp))]
+pub fn svminp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svminp_s8_m(pg, op1, op2)
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sminp))]
+pub fn svminp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv8i16")]
+        fn _svminp_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svminp_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sminp))]
+pub fn svminp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svminp_s16_m(pg, op1, op2)
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sminp))]
+pub fn svminp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv4i32")]
+        fn _svminp_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svminp_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sminp))]
+pub fn svminp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svminp_s32_m(pg, op1, op2)
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sminp))]
+pub fn svminp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sminp.nxv2i64")]
+        fn _svminp_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svminp_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sminp))]
+pub fn svminp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svminp_s64_m(pg, op1, op2)
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uminp))]
+pub fn svminp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv16i8")]
+        fn _svminp_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svminp_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uminp))]
+pub fn svminp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svminp_u8_m(pg, op1, op2)
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uminp))]
+pub fn svminp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv8i16")]
+        fn _svminp_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svminp_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uminp))]
+pub fn svminp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svminp_u16_m(pg, op1, op2)
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uminp))]
+pub fn svminp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv4i32")]
+        fn _svminp_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svminp_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uminp))]
+pub fn svminp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svminp_u32_m(pg, op1, op2)
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uminp))]
+pub fn svminp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uminp.nxv2i64")]
+        fn _svminp_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svminp_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Minimum pairwise"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uminp))]
+pub fn svminp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svminp_u64_m(pg, op1, op2)
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))]
+pub fn svmla_lane_s16<const IMM_INDEX: i32>(
+    op1: svint16_t,
+    op2: svint16_t,
+    op3: svint16_t,
+) -> svint16_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.mla.lane.nxv8i16"
+        )]
+        fn _svmla_lane_s16(
+            op1: svint16_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            IMM_INDEX: i32,
+        ) -> svint16_t;
+    }
+    unsafe { _svmla_lane_s16(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))]
+pub fn svmla_lane_s32<const IMM_INDEX: i32>(
+    op1: svint32_t,
+    op2: svint32_t,
+    op3: svint32_t,
+) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.mla.lane.nxv4i32"
+        )]
+        fn _svmla_lane_s32(
+            op1: svint32_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            IMM_INDEX: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svmla_lane_s32(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))]
+pub fn svmla_lane_s64<const IMM_INDEX: i32>(
+    op1: svint64_t,
+    op2: svint64_t,
+    op3: svint64_t,
+) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.mla.lane.nxv2i64"
+        )]
+        fn _svmla_lane_s64(
+            op1: svint64_t,
+            op2: svint64_t,
+            op3: svint64_t,
+            IMM_INDEX: i32,
+        ) -> svint64_t;
+    }
+    unsafe { _svmla_lane_s64(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))]
+pub fn svmla_lane_u16<const IMM_INDEX: i32>(
+    op1: svuint16_t,
+    op2: svuint16_t,
+    op3: svuint16_t,
+) -> svuint16_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    unsafe {
+        svmla_lane_s16::<IMM_INDEX>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
+    }
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))]
+pub fn svmla_lane_u32<const IMM_INDEX: i32>(
+    op1: svuint32_t,
+    op2: svuint32_t,
+    op3: svuint32_t,
+) -> svuint32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    unsafe {
+        svmla_lane_s32::<IMM_INDEX>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
+    }
+}
+#[doc = "Multiply-add, addend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))]
+pub fn svmla_lane_u64<const IMM_INDEX: i32>(
+    op1: svuint64_t,
+    op2: svuint64_t,
+    op3: svuint64_t,
+) -> svuint64_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    unsafe {
+        svmla_lane_s64::<IMM_INDEX>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
+    }
+}
+#[doc = "Multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlalb, IMM_INDEX = 0))]
+pub fn svmlalb_lane_s32<const IMM_INDEX: i32>(
+    op1: svint32_t,
+    op2: svint16_t,
+    op3: svint16_t,
+) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.smlalb.lane.nxv4i32"
+        )]
+        fn _svmlalb_lane_s32(
+            op1: svint32_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            IMM_INDEX: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svmlalb_lane_s32(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlalb, IMM_INDEX = 0))]
+pub fn svmlalb_lane_s64<const IMM_INDEX: i32>(
+    op1: svint64_t,
+    op2: svint32_t,
+    op3: svint32_t,
+) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.smlalb.lane.nxv2i64"
+        )]
+        fn _svmlalb_lane_s64(
+            op1: svint64_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            IMM_INDEX: i32,
+        ) -> svint64_t;
+    }
+    unsafe { _svmlalb_lane_s64(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlalb, IMM_INDEX = 0))]
+pub fn svmlalb_lane_u32<const IMM_INDEX: i32>(
+    op1: svuint32_t,
+    op2: svuint16_t,
+    op3: svuint16_t,
+) -> svuint32_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.umlalb.lane.nxv4i32"
+        )]
+        fn _svmlalb_lane_u32(
+            op1: svint32_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            IMM_INDEX: i32,
+        ) -> svint32_t;
+    }
+    unsafe {
+        _svmlalb_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX)
+            .as_unsigned()
+    }
+}
+#[doc = "Multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlalb, IMM_INDEX = 0))]
+pub fn svmlalb_lane_u64<const IMM_INDEX: i32>(
+    op1: svuint64_t,
+    op2: svuint32_t,
+    op3: svuint32_t,
+) -> svuint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.umlalb.lane.nxv2i64"
+        )]
+        fn _svmlalb_lane_u64(
+            op1: svint64_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            IMM_INDEX: i32,
+        ) -> svint64_t;
+    }
+    unsafe {
+        _svmlalb_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX)
+            .as_unsigned()
+    }
+}
+#[doc = "Multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlalb))]
+pub fn svmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv8i16")]
+        fn _svmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
+    }
+    unsafe { _svmlalb_s16(op1, op2, op3) }
+}
+#[doc = "Multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlalb))]
+pub fn svmlalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
+    svmlalb_s16(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlalb))]
+pub fn svmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv4i32")]
+        fn _svmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
+    }
+    unsafe { _svmlalb_s32(op1, op2, op3) }
+}
+#[doc = "Multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlalb))]
+pub fn svmlalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
+    svmlalb_s32(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlalb))]
+pub fn svmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalb.nxv2i64")]
+        fn _svmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
+    }
+    unsafe { _svmlalb_s64(op1, op2, op3) }
+}
+#[doc = "Multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlalb))]
+pub fn svmlalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
+    svmlalb_s64(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlalb))]
+pub fn svmlalb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv8i16")]
+        fn _svmlalb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
+    }
+    unsafe { _svmlalb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlalb))]
+pub fn svmlalb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t {
+    svmlalb_u16(op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlalb))]
+pub fn svmlalb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv4i32")]
+        fn _svmlalb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
+    }
+    unsafe { _svmlalb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlalb))]
+pub fn svmlalb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t {
+    svmlalb_u32(op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlalb))]
+pub fn svmlalb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalb.nxv2i64")]
+        fn _svmlalb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
+    }
+    unsafe { _svmlalb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlalb))]
+pub fn svmlalb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t {
+    svmlalb_u64(op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlalt, IMM_INDEX = 0))]
+pub fn svmlalt_lane_s32<const IMM_INDEX: i32>(
+    op1: svint32_t,
+    op2: svint16_t,
+    op3: svint16_t,
+) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.smlalt.lane.nxv4i32"
+        )]
+        fn _svmlalt_lane_s32(
+            op1: svint32_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            IMM_INDEX: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svmlalt_lane_s32(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlalt, IMM_INDEX = 0))]
+pub fn svmlalt_lane_s64<const IMM_INDEX: i32>(
+    op1: svint64_t,
+    op2: svint32_t,
+    op3: svint32_t,
+) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.smlalt.lane.nxv2i64"
+        )]
+        fn _svmlalt_lane_s64(
+            op1: svint64_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            IMM_INDEX: i32,
+        ) -> svint64_t;
+    }
+    unsafe { _svmlalt_lane_s64(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlalt, IMM_INDEX = 0))]
+pub fn svmlalt_lane_u32<const IMM_INDEX: i32>(
+    op1: svuint32_t,
+    op2: svuint16_t,
+    op3: svuint16_t,
+) -> svuint32_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.umlalt.lane.nxv4i32"
+        )]
+        fn _svmlalt_lane_u32(
+            op1: svint32_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            IMM_INDEX: i32,
+        ) -> svint32_t;
+    }
+    unsafe {
+        _svmlalt_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX)
+            .as_unsigned()
+    }
+}
+#[doc = "Multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlalt, IMM_INDEX = 0))]
+pub fn svmlalt_lane_u64<const IMM_INDEX: i32>(
+    op1: svuint64_t,
+    op2: svuint32_t,
+    op3: svuint32_t,
+) -> svuint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.umlalt.lane.nxv2i64"
+        )]
+        fn _svmlalt_lane_u64(
+            op1: svint64_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            IMM_INDEX: i32,
+        ) -> svint64_t;
+    }
+    unsafe {
+        _svmlalt_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX)
+            .as_unsigned()
+    }
+}
+#[doc = "Multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlalt))]
+pub fn svmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv8i16")]
+        fn _svmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
+    }
+    unsafe { _svmlalt_s16(op1, op2, op3) }
+}
+#[doc = "Multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlalt))]
+pub fn svmlalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
+    svmlalt_s16(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlalt))]
+pub fn svmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv4i32")]
+        fn _svmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
+    }
+    unsafe { _svmlalt_s32(op1, op2, op3) }
+}
+#[doc = "Multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlalt))]
+pub fn svmlalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
+    svmlalt_s32(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlalt))]
+pub fn svmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlalt.nxv2i64")]
+        fn _svmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
+    }
+    unsafe { _svmlalt_s64(op1, op2, op3) }
+}
+#[doc = "Multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlalt))]
+pub fn svmlalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
+    svmlalt_s64(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlalt))]
+pub fn svmlalt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv8i16")]
+        fn _svmlalt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
+    }
+    unsafe { _svmlalt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlalt))]
+pub fn svmlalt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t {
+    svmlalt_u16(op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlalt))]
+pub fn svmlalt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv4i32")]
+        fn _svmlalt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
+    }
+    unsafe { _svmlalt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlalt))]
+pub fn svmlalt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t {
+    svmlalt_u32(op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlalt))]
+pub fn svmlalt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlalt.nxv2i64")]
+        fn _svmlalt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
+    }
+    unsafe { _svmlalt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlalt))]
+pub fn svmlalt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t {
+    svmlalt_u64(op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))]
+pub fn svmls_lane_s16<const IMM_INDEX: i32>(
+    op1: svint16_t,
+    op2: svint16_t,
+    op3: svint16_t,
+) -> svint16_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.mls.lane.nxv8i16"
+        )]
+        fn _svmls_lane_s16(
+            op1: svint16_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            IMM_INDEX: i32,
+        ) -> svint16_t;
+    }
+    unsafe { _svmls_lane_s16(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))]
+pub fn svmls_lane_s32<const IMM_INDEX: i32>(
+    op1: svint32_t,
+    op2: svint32_t,
+    op3: svint32_t,
+) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.mls.lane.nxv4i32"
+        )]
+        fn _svmls_lane_s32(
+            op1: svint32_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            IMM_INDEX: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svmls_lane_s32(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))]
+pub fn svmls_lane_s64<const IMM_INDEX: i32>(
+    op1: svint64_t,
+    op2: svint64_t,
+    op3: svint64_t,
+) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.mls.lane.nxv2i64"
+        )]
+        fn _svmls_lane_s64(
+            op1: svint64_t,
+            op2: svint64_t,
+            op3: svint64_t,
+            IMM_INDEX: i32,
+        ) -> svint64_t;
+    }
+    unsafe { _svmls_lane_s64(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))]
+pub fn svmls_lane_u16<const IMM_INDEX: i32>(
+    op1: svuint16_t,
+    op2: svuint16_t,
+    op3: svuint16_t,
+) -> svuint16_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    unsafe {
+        svmls_lane_s16::<IMM_INDEX>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
+    }
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))]
+pub fn svmls_lane_u32<const IMM_INDEX: i32>(
+    op1: svuint32_t,
+    op2: svuint32_t,
+    op3: svuint32_t,
+) -> svuint32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    unsafe {
+        svmls_lane_s32::<IMM_INDEX>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
+    }
+}
+#[doc = "Multiply-subtract, minuend first"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))]
+pub fn svmls_lane_u64<const IMM_INDEX: i32>(
+    op1: svuint64_t,
+    op2: svuint64_t,
+    op3: svuint64_t,
+) -> svuint64_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    unsafe {
+        svmls_lane_s64::<IMM_INDEX>(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned()
+    }
+}
+#[doc = "Multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlslb, IMM_INDEX = 0))]
+pub fn svmlslb_lane_s32<const IMM_INDEX: i32>(
+    op1: svint32_t,
+    op2: svint16_t,
+    op3: svint16_t,
+) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.smlslb.lane.nxv4i32"
+        )]
+        fn _svmlslb_lane_s32(
+            op1: svint32_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            IMM_INDEX: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svmlslb_lane_s32(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlslb, IMM_INDEX = 0))]
+pub fn svmlslb_lane_s64<const IMM_INDEX: i32>(
+    op1: svint64_t,
+    op2: svint32_t,
+    op3: svint32_t,
+) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.smlslb.lane.nxv2i64"
+        )]
+        fn _svmlslb_lane_s64(
+            op1: svint64_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            IMM_INDEX: i32,
+        ) -> svint64_t;
+    }
+    unsafe { _svmlslb_lane_s64(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlslb, IMM_INDEX = 0))]
+pub fn svmlslb_lane_u32<const IMM_INDEX: i32>(
+    op1: svuint32_t,
+    op2: svuint16_t,
+    op3: svuint16_t,
+) -> svuint32_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.umlslb.lane.nxv4i32"
+        )]
+        fn _svmlslb_lane_u32(
+            op1: svint32_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            IMM_INDEX: i32,
+        ) -> svint32_t;
+    }
+    unsafe {
+        _svmlslb_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX)
+            .as_unsigned()
+    }
+}
+#[doc = "Multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlslb, IMM_INDEX = 0))]
+pub fn svmlslb_lane_u64<const IMM_INDEX: i32>(
+    op1: svuint64_t,
+    op2: svuint32_t,
+    op3: svuint32_t,
+) -> svuint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.umlslb.lane.nxv2i64"
+        )]
+        fn _svmlslb_lane_u64(
+            op1: svint64_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            IMM_INDEX: i32,
+        ) -> svint64_t;
+    }
+    unsafe {
+        _svmlslb_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX)
+            .as_unsigned()
+    }
+}
+#[doc = "Multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlslb))]
+pub fn svmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv8i16")]
+        fn _svmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
+    }
+    unsafe { _svmlslb_s16(op1, op2, op3) }
+}
+#[doc = "Multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlslb))]
+pub fn svmlslb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
+    svmlslb_s16(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlslb))]
+pub fn svmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv4i32")]
+        fn _svmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
+    }
+    unsafe { _svmlslb_s32(op1, op2, op3) }
+}
+#[doc = "Multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlslb))]
+pub fn svmlslb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
+    svmlslb_s32(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlslb))]
+pub fn svmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslb.nxv2i64")]
+        fn _svmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
+    }
+    unsafe { _svmlslb_s64(op1, op2, op3) }
+}
+#[doc = "Multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlslb))]
+pub fn svmlslb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
+    svmlslb_s64(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlslb))]
+pub fn svmlslb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv8i16")]
+        fn _svmlslb_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
+    }
+    unsafe { _svmlslb_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlslb))]
+pub fn svmlslb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t {
+    svmlslb_u16(op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlslb))]
+pub fn svmlslb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv4i32")]
+        fn _svmlslb_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
+    }
+    unsafe { _svmlslb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlslb))]
+pub fn svmlslb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t {
+    svmlslb_u32(op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlslb))]
+pub fn svmlslb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslb.nxv2i64")]
+        fn _svmlslb_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
+    }
+    unsafe { _svmlslb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlslb))]
+pub fn svmlslb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t {
+    svmlslb_u64(op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlslt, IMM_INDEX = 0))]
+pub fn svmlslt_lane_s32<const IMM_INDEX: i32>(
+    op1: svint32_t,
+    op2: svint16_t,
+    op3: svint16_t,
+) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.smlslt.lane.nxv4i32"
+        )]
+        fn _svmlslt_lane_s32(
+            op1: svint32_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            IMM_INDEX: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svmlslt_lane_s32(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlslt, IMM_INDEX = 0))]
+pub fn svmlslt_lane_s64<const IMM_INDEX: i32>(
+    op1: svint64_t,
+    op2: svint32_t,
+    op3: svint32_t,
+) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.smlslt.lane.nxv2i64"
+        )]
+        fn _svmlslt_lane_s64(
+            op1: svint64_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            IMM_INDEX: i32,
+        ) -> svint64_t;
+    }
+    unsafe { _svmlslt_lane_s64(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlslt, IMM_INDEX = 0))]
+pub fn svmlslt_lane_u32<const IMM_INDEX: i32>(
+    op1: svuint32_t,
+    op2: svuint16_t,
+    op3: svuint16_t,
+) -> svuint32_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.umlslt.lane.nxv4i32"
+        )]
+        fn _svmlslt_lane_u32(
+            op1: svint32_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            IMM_INDEX: i32,
+        ) -> svint32_t;
+    }
+    unsafe {
+        _svmlslt_lane_u32(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX)
+            .as_unsigned()
+    }
+}
+#[doc = "Multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlslt, IMM_INDEX = 0))]
+pub fn svmlslt_lane_u64<const IMM_INDEX: i32>(
+    op1: svuint64_t,
+    op2: svuint32_t,
+    op3: svuint32_t,
+) -> svuint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.umlslt.lane.nxv2i64"
+        )]
+        fn _svmlslt_lane_u64(
+            op1: svint64_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            IMM_INDEX: i32,
+        ) -> svint64_t;
+    }
+    unsafe {
+        _svmlslt_lane_u64(op1.as_signed(), op2.as_signed(), op3.as_signed(), IMM_INDEX)
+            .as_unsigned()
+    }
+}
+#[doc = "Multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlslt))]
+pub fn svmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv8i16")]
+        fn _svmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
+    }
+    unsafe { _svmlslt_s16(op1, op2, op3) }
+}
+#[doc = "Multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlslt))]
+pub fn svmlslt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
+    svmlslt_s16(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlslt))]
+pub fn svmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv4i32")]
+        fn _svmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
+    }
+    unsafe { _svmlslt_s32(op1, op2, op3) }
+}
+#[doc = "Multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlslt))]
+pub fn svmlslt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
+    svmlslt_s32(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlslt))]
+pub fn svmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smlslt.nxv2i64")]
+        fn _svmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
+    }
+    unsafe { _svmlslt_s64(op1, op2, op3) }
+}
+#[doc = "Multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smlslt))]
+pub fn svmlslt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
+    svmlslt_s64(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlslt))]
+pub fn svmlslt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv8i16")]
+        fn _svmlslt_u16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
+    }
+    unsafe { _svmlslt_u16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlslt))]
+pub fn svmlslt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t {
+    svmlslt_u16(op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlslt))]
+pub fn svmlslt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv4i32")]
+        fn _svmlslt_u32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
+    }
+    unsafe { _svmlslt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlslt))]
+pub fn svmlslt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t {
+    svmlslt_u32(op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlslt))]
+pub fn svmlslt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umlslt.nxv2i64")]
+        fn _svmlslt_u64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
+    }
+    unsafe { _svmlslt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umlslt))]
+pub fn svmlslt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t {
+    svmlslt_u64(op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Move long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sshllb))]
+pub fn svmovlb_s16(op: svint8_t) -> svint16_t {
+    svshllb_n_s16::<0>(op)
+}
+#[doc = "Move long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sshllb))]
+pub fn svmovlb_s32(op: svint16_t) -> svint32_t {
+    svshllb_n_s32::<0>(op)
+}
+#[doc = "Move long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sshllb))]
+pub fn svmovlb_s64(op: svint32_t) -> svint64_t {
+    svshllb_n_s64::<0>(op)
+}
+#[doc = "Move long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ushllb))]
+pub fn svmovlb_u16(op: svuint8_t) -> svuint16_t {
+    svshllb_n_u16::<0>(op)
+}
+#[doc = "Move long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ushllb))]
+pub fn svmovlb_u32(op: svuint16_t) -> svuint32_t {
+    svshllb_n_u32::<0>(op)
+}
+#[doc = "Move long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ushllb))]
+pub fn svmovlb_u64(op: svuint32_t) -> svuint64_t {
+    svshllb_n_u64::<0>(op)
+}
+#[doc = "Move long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sshllt))]
+pub fn svmovlt_s16(op: svint8_t) -> svint16_t {
+    svshllt_n_s16::<0>(op)
+}
+#[doc = "Move long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sshllt))]
+pub fn svmovlt_s32(op: svint16_t) -> svint32_t {
+    svshllt_n_s32::<0>(op)
+}
+#[doc = "Move long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sshllt))]
+pub fn svmovlt_s64(op: svint32_t) -> svint64_t {
+    svshllt_n_s64::<0>(op)
+}
+#[doc = "Move long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ushllt))]
+pub fn svmovlt_u16(op: svuint8_t) -> svuint16_t {
+    svshllt_n_u16::<0>(op)
+}
+#[doc = "Move long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ushllt))]
+pub fn svmovlt_u32(op: svuint16_t) -> svuint32_t {
+    svshllt_n_u32::<0>(op)
+}
+#[doc = "Move long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ushllt))]
+pub fn svmovlt_u64(op: svuint32_t) -> svuint64_t {
+    svshllt_n_u64::<0>(op)
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_f32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fmul, IMM_INDEX = 0))]
+pub fn svmul_lane_f32<const IMM_INDEX: i32>(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.fmul.lane.nxv4f32"
+        )]
+        fn _svmul_lane_f32(op1: svfloat32_t, op2: svfloat32_t, imm_index: i32) -> svfloat32_t;
+    }
+    unsafe { _svmul_lane_f32(op1, op2, IMM_INDEX) }
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_f64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(fmul, IMM_INDEX = 0))]
+pub fn svmul_lane_f64<const IMM_INDEX: i32>(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.fmul.lane.nxv2f64"
+        )]
+        fn _svmul_lane_f64(op1: svfloat64_t, op2: svfloat64_t, imm_index: i32) -> svfloat64_t;
+    }
+    unsafe { _svmul_lane_f64(op1, op2, IMM_INDEX) }
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))]
+pub fn svmul_lane_s16<const IMM_INDEX: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.mul.lane.nxv8i16"
+        )]
+        fn _svmul_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t;
+    }
+    unsafe { _svmul_lane_s16(op1, op2, IMM_INDEX) }
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))]
+pub fn svmul_lane_s32<const IMM_INDEX: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.mul.lane.nxv4i32"
+        )]
+        fn _svmul_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t;
+    }
+    unsafe { _svmul_lane_s32(op1, op2, IMM_INDEX) }
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))]
+pub fn svmul_lane_s64<const IMM_INDEX: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.mul.lane.nxv2i64"
+        )]
+        fn _svmul_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t;
+    }
+    unsafe { _svmul_lane_s64(op1, op2, IMM_INDEX) }
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))]
+pub fn svmul_lane_u16<const IMM_INDEX: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    unsafe { svmul_lane_s16::<IMM_INDEX>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))]
+pub fn svmul_lane_u32<const IMM_INDEX: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    unsafe { svmul_lane_s32::<IMM_INDEX>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))]
+pub fn svmul_lane_u64<const IMM_INDEX: i32>(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    unsafe { svmul_lane_s64::<IMM_INDEX>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smullb, IMM_INDEX = 0))]
+pub fn svmullb_lane_s32<const IMM_INDEX: i32>(op1: svint16_t, op2: svint16_t) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.smullb.lane.nxv4i32"
+        )]
+        fn _svmullb_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t;
+    }
+    unsafe { _svmullb_lane_s32(op1, op2, IMM_INDEX) }
+}
+#[doc = "Multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smullb, IMM_INDEX = 0))]
+pub fn svmullb_lane_s64<const IMM_INDEX: i32>(op1: svint32_t, op2: svint32_t) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.smullb.lane.nxv2i64"
+        )]
+        fn _svmullb_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t;
+    }
+    unsafe { _svmullb_lane_s64(op1, op2, IMM_INDEX) }
+}
+#[doc = "Multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umullb, IMM_INDEX = 0))]
+pub fn svmullb_lane_u32<const IMM_INDEX: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.umullb.lane.nxv4i32"
+        )]
+        fn _svmullb_lane_u32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t;
+    }
+    unsafe { _svmullb_lane_u32(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() }
+}
+#[doc = "Multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umullb, IMM_INDEX = 0))]
+pub fn svmullb_lane_u64<const IMM_INDEX: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.umullb.lane.nxv2i64"
+        )]
+        fn _svmullb_lane_u64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t;
+    }
+    unsafe { _svmullb_lane_u64(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() }
+}
+#[doc = "Multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smullb))]
+pub fn svmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv8i16")]
+        fn _svmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svmullb_s16(op1, op2) }
+}
+#[doc = "Multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smullb))]
+pub fn svmullb_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
+    svmullb_s16(op1, svdup_n_s8(op2))
+}
+#[doc = "Multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smullb))]
+pub fn svmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv4i32")]
+        fn _svmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svmullb_s32(op1, op2) }
+}
+#[doc = "Multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smullb))]
+pub fn svmullb_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
+    svmullb_s32(op1, svdup_n_s16(op2))
+}
+#[doc = "Multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smullb))]
+pub fn svmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullb.nxv2i64")]
+        fn _svmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svmullb_s64(op1, op2) }
+}
+#[doc = "Multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smullb))]
+pub fn svmullb_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
+    svmullb_s64(op1, svdup_n_s32(op2))
+}
+#[doc = "Multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umullb))]
+pub fn svmullb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv8i16")]
+        fn _svmullb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svmullb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umullb))]
+pub fn svmullb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
+    svmullb_u16(op1, svdup_n_u8(op2))
+}
+#[doc = "Multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umullb))]
+pub fn svmullb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv4i32")]
+        fn _svmullb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svmullb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umullb))]
+pub fn svmullb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t {
+    svmullb_u32(op1, svdup_n_u16(op2))
+}
+#[doc = "Multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umullb))]
+pub fn svmullb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullb.nxv2i64")]
+        fn _svmullb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svmullb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umullb))]
+pub fn svmullb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
+    svmullb_u64(op1, svdup_n_u32(op2))
+}
+#[doc = "Multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smullt, IMM_INDEX = 0))]
+pub fn svmullt_lane_s32<const IMM_INDEX: i32>(op1: svint16_t, op2: svint16_t) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.smullt.lane.nxv4i32"
+        )]
+        fn _svmullt_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t;
+    }
+    unsafe { _svmullt_lane_s32(op1, op2, IMM_INDEX) }
+}
+#[doc = "Multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smullt, IMM_INDEX = 0))]
+pub fn svmullt_lane_s64<const IMM_INDEX: i32>(op1: svint32_t, op2: svint32_t) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.smullt.lane.nxv2i64"
+        )]
+        fn _svmullt_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t;
+    }
+    unsafe { _svmullt_lane_s64(op1, op2, IMM_INDEX) }
+}
+#[doc = "Multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umullt, IMM_INDEX = 0))]
+pub fn svmullt_lane_u32<const IMM_INDEX: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.umullt.lane.nxv4i32"
+        )]
+        fn _svmullt_lane_u32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t;
+    }
+    unsafe { _svmullt_lane_u32(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() }
+}
+#[doc = "Multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umullt, IMM_INDEX = 0))]
+pub fn svmullt_lane_u64<const IMM_INDEX: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.umullt.lane.nxv2i64"
+        )]
+        fn _svmullt_lane_u64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t;
+    }
+    unsafe { _svmullt_lane_u64(op1.as_signed(), op2.as_signed(), IMM_INDEX).as_unsigned() }
+}
+#[doc = "Multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smullt))]
+pub fn svmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv8i16")]
+        fn _svmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svmullt_s16(op1, op2) }
+}
+#[doc = "Multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smullt))]
+pub fn svmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
+    svmullt_s16(op1, svdup_n_s8(op2))
+}
+#[doc = "Multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smullt))]
+pub fn svmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv4i32")]
+        fn _svmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svmullt_s32(op1, op2) }
+}
+#[doc = "Multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smullt))]
+pub fn svmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
+    svmullt_s32(op1, svdup_n_s16(op2))
+}
+#[doc = "Multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smullt))]
+pub fn svmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.smullt.nxv2i64")]
+        fn _svmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svmullt_s64(op1, op2) }
+}
+#[doc = "Multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(smullt))]
+pub fn svmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
+    svmullt_s64(op1, svdup_n_s32(op2))
+}
+#[doc = "Multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umullt))]
+pub fn svmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv8i16")]
+        fn _svmullt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svmullt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umullt))]
+pub fn svmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
+    svmullt_u16(op1, svdup_n_u8(op2))
+}
+#[doc = "Multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umullt))]
+pub fn svmullt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv4i32")]
+        fn _svmullt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svmullt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umullt))]
+pub fn svmullt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t {
+    svmullt_u32(op1, svdup_n_u16(op2))
+}
+#[doc = "Multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umullt))]
+pub fn svmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.umullt.nxv2i64")]
+        fn _svmullt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svmullt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(umullt))]
+pub fn svmullt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
+    svmullt_u64(op1, svdup_n_u32(op2))
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nbsl))]
+pub fn svnbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv16i8")]
+        fn _svnbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
+    }
+    unsafe { _svnbsl_s8(op1, op2, op3) }
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nbsl))]
+pub fn svnbsl_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svnbsl_s8(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nbsl))]
+pub fn svnbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv8i16")]
+        fn _svnbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
+    }
+    unsafe { _svnbsl_s16(op1, op2, op3) }
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nbsl))]
+pub fn svnbsl_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svnbsl_s16(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nbsl))]
+pub fn svnbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv4i32")]
+        fn _svnbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
+    }
+    unsafe { _svnbsl_s32(op1, op2, op3) }
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nbsl))]
+pub fn svnbsl_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svnbsl_s32(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nbsl))]
+pub fn svnbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nbsl.nxv2i64")]
+        fn _svnbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
+    }
+    unsafe { _svnbsl_s64(op1, op2, op3) }
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nbsl))]
+pub fn svnbsl_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svnbsl_s64(op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nbsl))]
+pub fn svnbsl_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t {
+    unsafe { svnbsl_s8(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nbsl))]
+pub fn svnbsl_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t {
+    svnbsl_u8(op1, op2, svdup_n_u8(op3))
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nbsl))]
+pub fn svnbsl_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_t {
+    unsafe { svnbsl_s16(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nbsl))]
+pub fn svnbsl_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t {
+    svnbsl_u16(op1, op2, svdup_n_u16(op3))
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nbsl))]
+pub fn svnbsl_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    unsafe { svnbsl_s32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nbsl))]
+pub fn svnbsl_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svnbsl_u32(op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nbsl))]
+pub fn svnbsl_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    unsafe { svnbsl_s64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise select"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nbsl))]
+pub fn svnbsl_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svnbsl_u64(op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Detect no matching elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nmatch))]
+pub fn svnmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nmatch.nxv16i8")]
+        fn _svnmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t;
+    }
+    unsafe { _svnmatch_s8(pg, op1, op2) }
+}
+#[doc = "Detect no matching elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nmatch))]
+pub fn svnmatch_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.nmatch.nxv8i16")]
+        fn _svnmatch_s16(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svbool8_t;
+    }
+    unsafe { _svnmatch_s16(pg.into(), op1, op2).into() }
+}
+#[doc = "Detect no matching elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nmatch))]
+pub fn svnmatch_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t {
+    unsafe { svnmatch_s8(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Detect no matching elements"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(nmatch))]
+pub fn svnmatch_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t {
+    unsafe { svnmatch_s16(pg, op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "Polynomial multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmul[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(pmul))]
+pub fn svpmul_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.pmul.nxv16i8")]
+        fn _svpmul_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svpmul_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Polynomial multiply"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmul[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(pmul))]
+pub fn svpmul_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t {
+    svpmul_u8(op1, svdup_n_u8(op2))
+}
+#[doc = "Polynomial multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullb))]
+pub fn svpmullb_pair_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.pmullb.pair.nxv16i8"
+        )]
+        fn _svpmullb_pair_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svpmullb_pair_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Polynomial multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullb))]
+pub fn svpmullb_pair_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t {
+    svpmullb_pair_u8(op1, svdup_n_u8(op2))
+}
+#[doc = "Polynomial multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullb))]
+pub fn svpmullb_pair_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.pmullb.pair.nxv4i32"
+        )]
+        fn _svpmullb_pair_u32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svpmullb_pair_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Polynomial multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullb))]
+pub fn svpmullb_pair_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t {
+    svpmullb_pair_u32(op1, svdup_n_u32(op2))
+}
+#[doc = "Polynomial multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullb))]
+pub fn svpmullb_pair_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.pmullb.pair.nxv2i64"
+        )]
+        fn _svpmullb_pair_u64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svpmullb_pair_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Polynomial multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullb))]
+pub fn svpmullb_pair_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t {
+    svpmullb_pair_u64(op1, svdup_n_u64(op2))
+}
+#[doc = "Polynomial multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullb))]
+pub fn svpmullb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
+    unsafe { simd_reinterpret(svpmullb_pair_u8(op1, op2)) }
+}
+#[doc = "Polynomial multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullb))]
+pub fn svpmullb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
+    svpmullb_u16(op1, svdup_n_u8(op2))
+}
+#[doc = "Polynomial multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullb))]
+pub fn svpmullb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
+    unsafe { simd_reinterpret(svpmullb_pair_u32(op1, op2)) }
+}
+#[doc = "Polynomial multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullb))]
+pub fn svpmullb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
+    svpmullb_u64(op1, svdup_n_u32(op2))
+}
+#[doc = "Polynomial multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullt))]
+pub fn svpmullt_pair_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.pmullt.pair.nxv16i8"
+        )]
+        fn _svpmullt_pair_u8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svpmullt_pair_u8(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Polynomial multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullt))]
+pub fn svpmullt_pair_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t {
+    svpmullt_pair_u8(op1, svdup_n_u8(op2))
+}
+#[doc = "Polynomial multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullt))]
+pub fn svpmullt_pair_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.pmullt.pair.nxv4i32"
+        )]
+        fn _svpmullt_pair_u32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svpmullt_pair_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Polynomial multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullt))]
+pub fn svpmullt_pair_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t {
+    svpmullt_pair_u32(op1, svdup_n_u32(op2))
+}
+#[doc = "Polynomial multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullt))]
+pub fn svpmullt_pair_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.pmullt.pair.nxv2i64"
+        )]
+        fn _svpmullt_pair_u64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svpmullt_pair_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Polynomial multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullt))]
+pub fn svpmullt_pair_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t {
+    svpmullt_pair_u64(op1, svdup_n_u64(op2))
+}
+#[doc = "Polynomial multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullt))]
+pub fn svpmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
+    unsafe { simd_reinterpret(svpmullt_pair_u8(op1, op2)) }
+}
+#[doc = "Polynomial multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullt))]
+pub fn svpmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
+    svpmullt_u16(op1, svdup_n_u8(op2))
+}
+#[doc = "Polynomial multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullt))]
+pub fn svpmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
+    unsafe { simd_reinterpret(svpmullt_pair_u32(op1, op2)) }
+}
+#[doc = "Polynomial multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-aes")]
+#[cfg_attr(test, assert_instr(pmullt))]
+pub fn svpmullt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
+    svpmullt_u64(op1, svdup_n_u32(op2))
+}
+#[doc = "Saturating absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqabs))]
+pub fn svqabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv16i8")]
+        fn _svqabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqabs_s8_m(inactive, pg, op) }
+}
+#[doc = "Saturating absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqabs))]
+pub fn svqabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t {
+    svqabs_s8_m(op, pg, op)
+}
+#[doc = "Saturating absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqabs))]
+pub fn svqabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t {
+    svqabs_s8_m(svdup_n_s8(0), pg, op)
+}
+#[doc = "Saturating absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqabs))]
+pub fn svqabs_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv8i16")]
+        fn _svqabs_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqabs_s16_m(inactive, pg.into(), op) }
+}
+#[doc = "Saturating absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqabs))]
+pub fn svqabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t {
+    svqabs_s16_m(op, pg, op)
+}
+#[doc = "Saturating absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqabs))]
+pub fn svqabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t {
+    svqabs_s16_m(svdup_n_s16(0), pg, op)
+}
+#[doc = "Saturating absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqabs))]
+pub fn svqabs_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv4i32")]
+        fn _svqabs_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqabs_s32_m(inactive, pg.into(), op) }
+}
+#[doc = "Saturating absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqabs))]
+pub fn svqabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svqabs_s32_m(op, pg, op)
+}
+#[doc = "Saturating absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqabs))]
+pub fn svqabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svqabs_s32_m(svdup_n_s32(0), pg, op)
+}
+#[doc = "Saturating absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqabs))]
+pub fn svqabs_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqabs.nxv2i64")]
+        fn _svqabs_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqabs_s64_m(inactive, pg.into(), op) }
+}
+#[doc = "Saturating absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqabs))]
+pub fn svqabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svqabs_s64_m(op, pg, op)
+}
+#[doc = "Saturating absolute value"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqabs))]
+pub fn svqabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svqabs_s64_m(svdup_n_s64(0), pg, op)
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv16i8")]
+        fn _svqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqadd_s8_m(pg, op1, op2) }
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svqadd_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svqadd_s8_m(pg, op1, op2)
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svqadd_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svqadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svqadd_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv8i16")]
+        fn _svqadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqadd_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svqadd_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svqadd_s16_m(pg, op1, op2)
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svqadd_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svqadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svqadd_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv4i32")]
+        fn _svqadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqadd_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svqadd_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svqadd_s32_m(pg, op1, op2)
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svqadd_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svqadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svqadd_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqadd.nxv2i64")]
+        fn _svqadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqadd_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svqadd_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svqadd_s64_m(pg, op1, op2)
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svqadd_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svqadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqadd))]
+pub fn svqadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svqadd_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv16i8")]
+        fn _svqadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svqadd_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svqadd_u8_m(pg, op1, op2)
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svqadd_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svqadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svqadd_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv8i16")]
+        fn _svqadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqadd_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svqadd_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svqadd_u16_m(pg, op1, op2)
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svqadd_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svqadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svqadd_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv4i32")]
+        fn _svqadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqadd_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svqadd_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svqadd_u32_m(pg, op1, op2)
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svqadd_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svqadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svqadd_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqadd.nxv2i64")]
+        fn _svqadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqadd_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svqadd_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svqadd_u64_m(pg, op1, op2)
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svqadd_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svqadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Saturating add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqadd))]
+pub fn svqadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svqadd_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Saturating complex add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))]
+pub fn svqcadd_s8<const IMM_ROTATION: i32>(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqcadd.x.nxv16i8"
+        )]
+        fn _svqcadd_s8(op1: svint8_t, op2: svint8_t, imm_rotation: i32) -> svint8_t;
+    }
+    unsafe { _svqcadd_s8(op1, op2, IMM_ROTATION) }
+}
+#[doc = "Saturating complex add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))]
+pub fn svqcadd_s16<const IMM_ROTATION: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqcadd.x.nxv8i16"
+        )]
+        fn _svqcadd_s16(op1: svint16_t, op2: svint16_t, imm_rotation: i32) -> svint16_t;
+    }
+    unsafe { _svqcadd_s16(op1, op2, IMM_ROTATION) }
+}
+#[doc = "Saturating complex add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))]
+pub fn svqcadd_s32<const IMM_ROTATION: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqcadd.x.nxv4i32"
+        )]
+        fn _svqcadd_s32(op1: svint32_t, op2: svint32_t, imm_rotation: i32) -> svint32_t;
+    }
+    unsafe { _svqcadd_s32(op1, op2, IMM_ROTATION) }
+}
+#[doc = "Saturating complex add with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))]
+pub fn svqcadd_s64<const IMM_ROTATION: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    static_assert!(IMM_ROTATION == 90 || IMM_ROTATION == 270);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqcadd.x.nxv2i64"
+        )]
+        fn _svqcadd_s64(op1: svint64_t, op2: svint64_t, imm_rotation: i32) -> svint64_t;
+    }
+    unsafe { _svqcadd_s64(op1, op2, IMM_ROTATION) }
+}
+#[doc = "Saturating doubling multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalb, IMM_INDEX = 0))]
+pub fn svqdmlalb_lane_s32<const IMM_INDEX: i32>(
+    op1: svint32_t,
+    op2: svint16_t,
+    op3: svint16_t,
+) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlalb.lane.nxv4i32"
+        )]
+        fn _svqdmlalb_lane_s32(
+            op1: svint32_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            IMM_INDEX: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svqdmlalb_lane_s32(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Saturating doubling multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalb, IMM_INDEX = 0))]
+pub fn svqdmlalb_lane_s64<const IMM_INDEX: i32>(
+    op1: svint64_t,
+    op2: svint32_t,
+    op3: svint32_t,
+) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlalb.lane.nxv2i64"
+        )]
+        fn _svqdmlalb_lane_s64(
+            op1: svint64_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            IMM_INDEX: i32,
+        ) -> svint64_t;
+    }
+    unsafe { _svqdmlalb_lane_s64(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Saturating doubling multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalb))]
+pub fn svqdmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlalb.nxv8i16"
+        )]
+        fn _svqdmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
+    }
+    unsafe { _svqdmlalb_s16(op1, op2, op3) }
+}
+#[doc = "Saturating doubling multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalb))]
+pub fn svqdmlalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
+    svqdmlalb_s16(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Saturating doubling multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalb))]
+pub fn svqdmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlalb.nxv4i32"
+        )]
+        fn _svqdmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
+    }
+    unsafe { _svqdmlalb_s32(op1, op2, op3) }
+}
+#[doc = "Saturating doubling multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalb))]
+pub fn svqdmlalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
+    svqdmlalb_s32(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Saturating doubling multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalb))]
+pub fn svqdmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlalb.nxv2i64"
+        )]
+        fn _svqdmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
+    }
+    unsafe { _svqdmlalb_s64(op1, op2, op3) }
+}
+#[doc = "Saturating doubling multiply-add long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalb))]
+pub fn svqdmlalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
+    svqdmlalb_s64(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Saturating doubling multiply-add long (bottom × top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalbt))]
+pub fn svqdmlalbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlalbt.nxv8i16"
+        )]
+        fn _svqdmlalbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
+    }
+    unsafe { _svqdmlalbt_s16(op1, op2, op3) }
+}
+#[doc = "Saturating doubling multiply-add long (bottom × top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalbt))]
+pub fn svqdmlalbt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
+    svqdmlalbt_s16(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Saturating doubling multiply-add long (bottom × top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalbt))]
+pub fn svqdmlalbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlalbt.nxv4i32"
+        )]
+        fn _svqdmlalbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
+    }
+    unsafe { _svqdmlalbt_s32(op1, op2, op3) }
+}
+#[doc = "Saturating doubling multiply-add long (bottom × top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalbt))]
+pub fn svqdmlalbt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
+    svqdmlalbt_s32(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Saturating doubling multiply-add long (bottom × top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalbt))]
+pub fn svqdmlalbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlalbt.nxv2i64"
+        )]
+        fn _svqdmlalbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
+    }
+    unsafe { _svqdmlalbt_s64(op1, op2, op3) }
+}
+#[doc = "Saturating doubling multiply-add long (bottom × top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalbt))]
+pub fn svqdmlalbt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
+    svqdmlalbt_s64(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Saturating doubling multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalt, IMM_INDEX = 0))]
+pub fn svqdmlalt_lane_s32<const IMM_INDEX: i32>(
+    op1: svint32_t,
+    op2: svint16_t,
+    op3: svint16_t,
+) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlalt.lane.nxv4i32"
+        )]
+        fn _svqdmlalt_lane_s32(
+            op1: svint32_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            IMM_INDEX: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svqdmlalt_lane_s32(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Saturating doubling multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalt, IMM_INDEX = 0))]
+pub fn svqdmlalt_lane_s64<const IMM_INDEX: i32>(
+    op1: svint64_t,
+    op2: svint32_t,
+    op3: svint32_t,
+) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlalt.lane.nxv2i64"
+        )]
+        fn _svqdmlalt_lane_s64(
+            op1: svint64_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            IMM_INDEX: i32,
+        ) -> svint64_t;
+    }
+    unsafe { _svqdmlalt_lane_s64(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Saturating doubling multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalt))]
+pub fn svqdmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlalt.nxv8i16"
+        )]
+        fn _svqdmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
+    }
+    unsafe { _svqdmlalt_s16(op1, op2, op3) }
+}
+#[doc = "Saturating doubling multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalt))]
+pub fn svqdmlalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
+    svqdmlalt_s16(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Saturating doubling multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalt))]
+pub fn svqdmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlalt.nxv4i32"
+        )]
+        fn _svqdmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
+    }
+    unsafe { _svqdmlalt_s32(op1, op2, op3) }
+}
+#[doc = "Saturating doubling multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalt))]
+pub fn svqdmlalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
+    svqdmlalt_s32(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Saturating doubling multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalt))]
+pub fn svqdmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlalt.nxv2i64"
+        )]
+        fn _svqdmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
+    }
+    unsafe { _svqdmlalt_s64(op1, op2, op3) }
+}
+#[doc = "Saturating doubling multiply-add long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlalt))]
+pub fn svqdmlalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
+    svqdmlalt_s64(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Saturating doubling multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslb, IMM_INDEX = 0))]
+pub fn svqdmlslb_lane_s32<const IMM_INDEX: i32>(
+    op1: svint32_t,
+    op2: svint16_t,
+    op3: svint16_t,
+) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlslb.lane.nxv4i32"
+        )]
+        fn _svqdmlslb_lane_s32(
+            op1: svint32_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            IMM_INDEX: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svqdmlslb_lane_s32(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Saturating doubling multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslb, IMM_INDEX = 0))]
+pub fn svqdmlslb_lane_s64<const IMM_INDEX: i32>(
+    op1: svint64_t,
+    op2: svint32_t,
+    op3: svint32_t,
+) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlslb.lane.nxv2i64"
+        )]
+        fn _svqdmlslb_lane_s64(
+            op1: svint64_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            IMM_INDEX: i32,
+        ) -> svint64_t;
+    }
+    unsafe { _svqdmlslb_lane_s64(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Saturating doubling multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslb))]
+pub fn svqdmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlslb.nxv8i16"
+        )]
+        fn _svqdmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
+    }
+    unsafe { _svqdmlslb_s16(op1, op2, op3) }
+}
+#[doc = "Saturating doubling multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslb))]
+pub fn svqdmlslb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
+    svqdmlslb_s16(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Saturating doubling multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslb))]
+pub fn svqdmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlslb.nxv4i32"
+        )]
+        fn _svqdmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
+    }
+    unsafe { _svqdmlslb_s32(op1, op2, op3) }
+}
+#[doc = "Saturating doubling multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslb))]
+pub fn svqdmlslb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
+    svqdmlslb_s32(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Saturating doubling multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslb))]
+pub fn svqdmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlslb.nxv2i64"
+        )]
+        fn _svqdmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
+    }
+    unsafe { _svqdmlslb_s64(op1, op2, op3) }
+}
+#[doc = "Saturating doubling multiply-subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslb))]
+pub fn svqdmlslb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
+    svqdmlslb_s64(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Saturating doubling multiply-subtract long (bottom × top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslbt))]
+pub fn svqdmlslbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlslbt.nxv8i16"
+        )]
+        fn _svqdmlslbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
+    }
+    unsafe { _svqdmlslbt_s16(op1, op2, op3) }
+}
+#[doc = "Saturating doubling multiply-subtract long (bottom × top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslbt))]
+pub fn svqdmlslbt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
+    svqdmlslbt_s16(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Saturating doubling multiply-subtract long (bottom × top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslbt))]
+pub fn svqdmlslbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlslbt.nxv4i32"
+        )]
+        fn _svqdmlslbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
+    }
+    unsafe { _svqdmlslbt_s32(op1, op2, op3) }
+}
+#[doc = "Saturating doubling multiply-subtract long (bottom × top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslbt))]
+pub fn svqdmlslbt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
+    svqdmlslbt_s32(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Saturating doubling multiply-subtract long (bottom × top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslbt))]
+pub fn svqdmlslbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlslbt.nxv2i64"
+        )]
+        fn _svqdmlslbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
+    }
+    unsafe { _svqdmlslbt_s64(op1, op2, op3) }
+}
+#[doc = "Saturating doubling multiply-subtract long (bottom × top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslbt))]
+pub fn svqdmlslbt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
+    svqdmlslbt_s64(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Saturating doubling multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslt, IMM_INDEX = 0))]
+pub fn svqdmlslt_lane_s32<const IMM_INDEX: i32>(
+    op1: svint32_t,
+    op2: svint16_t,
+    op3: svint16_t,
+) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlslt.lane.nxv4i32"
+        )]
+        fn _svqdmlslt_lane_s32(
+            op1: svint32_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            IMM_INDEX: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svqdmlslt_lane_s32(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Saturating doubling multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslt, IMM_INDEX = 0))]
+pub fn svqdmlslt_lane_s64<const IMM_INDEX: i32>(
+    op1: svint64_t,
+    op2: svint32_t,
+    op3: svint32_t,
+) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlslt.lane.nxv2i64"
+        )]
+        fn _svqdmlslt_lane_s64(
+            op1: svint64_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            IMM_INDEX: i32,
+        ) -> svint64_t;
+    }
+    unsafe { _svqdmlslt_lane_s64(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Saturating doubling multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslt))]
+pub fn svqdmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlslt.nxv8i16"
+        )]
+        fn _svqdmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t;
+    }
+    unsafe { _svqdmlslt_s16(op1, op2, op3) }
+}
+#[doc = "Saturating doubling multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslt))]
+pub fn svqdmlslt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t {
+    svqdmlslt_s16(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Saturating doubling multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslt))]
+pub fn svqdmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlslt.nxv4i32"
+        )]
+        fn _svqdmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t;
+    }
+    unsafe { _svqdmlslt_s32(op1, op2, op3) }
+}
+#[doc = "Saturating doubling multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslt))]
+pub fn svqdmlslt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t {
+    svqdmlslt_s32(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Saturating doubling multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslt))]
+pub fn svqdmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmlslt.nxv2i64"
+        )]
+        fn _svqdmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t;
+    }
+    unsafe { _svqdmlslt_s64(op1, op2, op3) }
+}
+#[doc = "Saturating doubling multiply-subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmlslt))]
+pub fn svqdmlslt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t {
+    svqdmlslt_s64(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Saturating doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))]
+pub fn svqdmulh_lane_s16<const IMM_INDEX: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv8i16"
+        )]
+        fn _svqdmulh_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t;
+    }
+    unsafe { _svqdmulh_lane_s16(op1, op2, IMM_INDEX) }
+}
+#[doc = "Saturating doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))]
+pub fn svqdmulh_lane_s32<const IMM_INDEX: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv4i32"
+        )]
+        fn _svqdmulh_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t;
+    }
+    unsafe { _svqdmulh_lane_s32(op1, op2, IMM_INDEX) }
+}
+#[doc = "Saturating doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))]
+pub fn svqdmulh_lane_s64<const IMM_INDEX: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmulh.lane.nxv2i64"
+        )]
+        fn _svqdmulh_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t;
+    }
+    unsafe { _svqdmulh_lane_s64(op1, op2, IMM_INDEX) }
+}
+#[doc = "Saturating doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmulh))]
+pub fn svqdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmulh.nxv16i8"
+        )]
+        fn _svqdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqdmulh_s8(op1, op2) }
+}
+#[doc = "Saturating doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmulh))]
+pub fn svqdmulh_n_s8(op1: svint8_t, op2: i8) -> svint8_t {
+    svqdmulh_s8(op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmulh))]
+pub fn svqdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmulh.nxv8i16"
+        )]
+        fn _svqdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqdmulh_s16(op1, op2) }
+}
+#[doc = "Saturating doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmulh))]
+pub fn svqdmulh_n_s16(op1: svint16_t, op2: i16) -> svint16_t {
+    svqdmulh_s16(op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmulh))]
+pub fn svqdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmulh.nxv4i32"
+        )]
+        fn _svqdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqdmulh_s32(op1, op2) }
+}
+#[doc = "Saturating doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmulh))]
+pub fn svqdmulh_n_s32(op1: svint32_t, op2: i32) -> svint32_t {
+    svqdmulh_s32(op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmulh))]
+pub fn svqdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmulh.nxv2i64"
+        )]
+        fn _svqdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqdmulh_s64(op1, op2) }
+}
+#[doc = "Saturating doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmulh))]
+pub fn svqdmulh_n_s64(op1: svint64_t, op2: i64) -> svint64_t {
+    svqdmulh_s64(op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating doubling multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmullb, IMM_INDEX = 0))]
+pub fn svqdmullb_lane_s32<const IMM_INDEX: i32>(op1: svint16_t, op2: svint16_t) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmullb.lane.nxv4i32"
+        )]
+        fn _svqdmullb_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t;
+    }
+    unsafe { _svqdmullb_lane_s32(op1, op2, IMM_INDEX) }
+}
+#[doc = "Saturating doubling multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmullb, IMM_INDEX = 0))]
+pub fn svqdmullb_lane_s64<const IMM_INDEX: i32>(op1: svint32_t, op2: svint32_t) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmullb.lane.nxv2i64"
+        )]
+        fn _svqdmullb_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t;
+    }
+    unsafe { _svqdmullb_lane_s64(op1, op2, IMM_INDEX) }
+}
+#[doc = "Saturating doubling multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmullb))]
+pub fn svqdmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmullb.nxv8i16"
+        )]
+        fn _svqdmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svqdmullb_s16(op1, op2) }
+}
+#[doc = "Saturating doubling multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmullb))]
+pub fn svqdmullb_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
+    svqdmullb_s16(op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating doubling multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmullb))]
+pub fn svqdmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmullb.nxv4i32"
+        )]
+        fn _svqdmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svqdmullb_s32(op1, op2) }
+}
+#[doc = "Saturating doubling multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmullb))]
+pub fn svqdmullb_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
+    svqdmullb_s32(op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating doubling multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmullb))]
+pub fn svqdmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmullb.nxv2i64"
+        )]
+        fn _svqdmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svqdmullb_s64(op1, op2) }
+}
+#[doc = "Saturating doubling multiply long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmullb))]
+pub fn svqdmullb_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
+    svqdmullb_s64(op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating doubling multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmullt, IMM_INDEX = 0))]
+pub fn svqdmullt_lane_s32<const IMM_INDEX: i32>(op1: svint16_t, op2: svint16_t) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmullt.lane.nxv4i32"
+        )]
+        fn _svqdmullt_lane_s32(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint32_t;
+    }
+    unsafe { _svqdmullt_lane_s32(op1, op2, IMM_INDEX) }
+}
+#[doc = "Saturating doubling multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmullt, IMM_INDEX = 0))]
+pub fn svqdmullt_lane_s64<const IMM_INDEX: i32>(op1: svint32_t, op2: svint32_t) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmullt.lane.nxv2i64"
+        )]
+        fn _svqdmullt_lane_s64(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint64_t;
+    }
+    unsafe { _svqdmullt_lane_s64(op1, op2, IMM_INDEX) }
+}
+#[doc = "Saturating doubling multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmullt))]
+pub fn svqdmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmullt.nxv8i16"
+        )]
+        fn _svqdmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svqdmullt_s16(op1, op2) }
+}
+#[doc = "Saturating doubling multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmullt))]
+pub fn svqdmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
+    svqdmullt_s16(op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating doubling multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmullt))]
+pub fn svqdmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmullt.nxv4i32"
+        )]
+        fn _svqdmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svqdmullt_s32(op1, op2) }
+}
+#[doc = "Saturating doubling multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmullt))]
+pub fn svqdmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
+    svqdmullt_s32(op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating doubling multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmullt))]
+pub fn svqdmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqdmullt.nxv2i64"
+        )]
+        fn _svqdmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svqdmullt_s64(op1, op2) }
+}
+#[doc = "Saturating doubling multiply long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqdmullt))]
+pub fn svqdmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
+    svqdmullt_s64(op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqneg))]
+pub fn svqneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv16i8")]
+        fn _svqneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqneg_s8_m(inactive, pg, op) }
+}
+#[doc = "Saturating negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqneg))]
+pub fn svqneg_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t {
+    svqneg_s8_m(op, pg, op)
+}
+#[doc = "Saturating negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqneg))]
+pub fn svqneg_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t {
+    svqneg_s8_m(svdup_n_s8(0), pg, op)
+}
+#[doc = "Saturating negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqneg))]
+pub fn svqneg_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv8i16")]
+        fn _svqneg_s16_m(inactive: svint16_t, pg: svbool8_t, op: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqneg_s16_m(inactive, pg.into(), op) }
+}
+#[doc = "Saturating negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqneg))]
+pub fn svqneg_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t {
+    svqneg_s16_m(op, pg, op)
+}
+#[doc = "Saturating negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqneg))]
+pub fn svqneg_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t {
+    svqneg_s16_m(svdup_n_s16(0), pg, op)
+}
+#[doc = "Saturating negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqneg))]
+pub fn svqneg_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv4i32")]
+        fn _svqneg_s32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqneg_s32_m(inactive, pg.into(), op) }
+}
+#[doc = "Saturating negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqneg))]
+pub fn svqneg_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svqneg_s32_m(op, pg, op)
+}
+#[doc = "Saturating negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqneg))]
+pub fn svqneg_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t {
+    svqneg_s32_m(svdup_n_s32(0), pg, op)
+}
+#[doc = "Saturating negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqneg))]
+pub fn svqneg_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqneg.nxv2i64")]
+        fn _svqneg_s64_m(inactive: svint64_t, pg: svbool2_t, op: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqneg_s64_m(inactive, pg.into(), op) }
+}
+#[doc = "Saturating negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqneg))]
+pub fn svqneg_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svqneg_s64_m(op, pg, op)
+}
+#[doc = "Saturating negate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqneg))]
+pub fn svqneg_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t {
+    svqneg_s64_m(svdup_n_s64(0), pg, op)
+}
+#[doc = "Saturating rounding doubling complex multiply-add high with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah_lane[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_INDEX = 0, IMM_ROTATION = 90))]
+pub fn svqrdcmlah_lane_s16<const IMM_INDEX: i32, const IMM_ROTATION: i32>(
+    op1: svint16_t,
+    op2: svint16_t,
+    op3: svint16_t,
+) -> svint16_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdcmlah.lane.x.nxv8i16"
+        )]
+        fn _svqrdcmlah_lane_s16(
+            op1: svint16_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            imm_index: i32,
+            imm_rotation: i32,
+        ) -> svint16_t;
+    }
+    unsafe { _svqrdcmlah_lane_s16(op1, op2, op3, IMM_INDEX, IMM_ROTATION) }
+}
+#[doc = "Saturating rounding doubling complex multiply-add high with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_INDEX = 0, IMM_ROTATION = 90))]
+pub fn svqrdcmlah_lane_s32<const IMM_INDEX: i32, const IMM_ROTATION: i32>(
+    op1: svint32_t,
+    op2: svint32_t,
+    op3: svint32_t,
+) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdcmlah.lane.x.nxv4i32"
+        )]
+        fn _svqrdcmlah_lane_s32(
+            op1: svint32_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            imm_index: i32,
+            imm_rotation: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svqrdcmlah_lane_s32(op1, op2, op3, IMM_INDEX, IMM_ROTATION) }
+}
+#[doc = "Saturating rounding doubling complex multiply-add high with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))]
+pub fn svqrdcmlah_s8<const IMM_ROTATION: i32>(
+    op1: svint8_t,
+    op2: svint8_t,
+    op3: svint8_t,
+) -> svint8_t {
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv16i8"
+        )]
+        fn _svqrdcmlah_s8(
+            op1: svint8_t,
+            op2: svint8_t,
+            op3: svint8_t,
+            imm_rotation: i32,
+        ) -> svint8_t;
+    }
+    unsafe { _svqrdcmlah_s8(op1, op2, op3, IMM_ROTATION) }
+}
+#[doc = "Saturating rounding doubling complex multiply-add high with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))]
+pub fn svqrdcmlah_s16<const IMM_ROTATION: i32>(
+    op1: svint16_t,
+    op2: svint16_t,
+    op3: svint16_t,
+) -> svint16_t {
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv8i16"
+        )]
+        fn _svqrdcmlah_s16(
+            op1: svint16_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            imm_rotation: i32,
+        ) -> svint16_t;
+    }
+    unsafe { _svqrdcmlah_s16(op1, op2, op3, IMM_ROTATION) }
+}
+#[doc = "Saturating rounding doubling complex multiply-add high with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))]
+pub fn svqrdcmlah_s32<const IMM_ROTATION: i32>(
+    op1: svint32_t,
+    op2: svint32_t,
+    op3: svint32_t,
+) -> svint32_t {
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv4i32"
+        )]
+        fn _svqrdcmlah_s32(
+            op1: svint32_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            imm_rotation: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svqrdcmlah_s32(op1, op2, op3, IMM_ROTATION) }
+}
+#[doc = "Saturating rounding doubling complex multiply-add high with rotate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))]
+pub fn svqrdcmlah_s64<const IMM_ROTATION: i32>(
+    op1: svint64_t,
+    op2: svint64_t,
+    op3: svint64_t,
+) -> svint64_t {
+    static_assert!(
+        IMM_ROTATION == 0 || IMM_ROTATION == 90 || IMM_ROTATION == 180 || IMM_ROTATION == 270
+    );
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdcmlah.x.nxv2i64"
+        )]
+        fn _svqrdcmlah_s64(
+            op1: svint64_t,
+            op2: svint64_t,
+            op3: svint64_t,
+            imm_rotation: i32,
+        ) -> svint64_t;
+    }
+    unsafe { _svqrdcmlah_s64(op1, op2, op3, IMM_ROTATION) }
+}
+#[doc = "Saturating rounding doubling multiply-add high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))]
+pub fn svqrdmlah_lane_s16<const IMM_INDEX: i32>(
+    op1: svint16_t,
+    op2: svint16_t,
+    op3: svint16_t,
+) -> svint16_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv8i16"
+        )]
+        fn _svqrdmlah_lane_s16(
+            op1: svint16_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            IMM_INDEX: i32,
+        ) -> svint16_t;
+    }
+    unsafe { _svqrdmlah_lane_s16(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Saturating rounding doubling multiply-add high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))]
+pub fn svqrdmlah_lane_s32<const IMM_INDEX: i32>(
+    op1: svint32_t,
+    op2: svint32_t,
+    op3: svint32_t,
+) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv4i32"
+        )]
+        fn _svqrdmlah_lane_s32(
+            op1: svint32_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            IMM_INDEX: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svqrdmlah_lane_s32(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Saturating rounding doubling multiply-add high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))]
+pub fn svqrdmlah_lane_s64<const IMM_INDEX: i32>(
+    op1: svint64_t,
+    op2: svint64_t,
+    op3: svint64_t,
+) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmlah.lane.nxv2i64"
+        )]
+        fn _svqrdmlah_lane_s64(
+            op1: svint64_t,
+            op2: svint64_t,
+            op3: svint64_t,
+            IMM_INDEX: i32,
+        ) -> svint64_t;
+    }
+    unsafe { _svqrdmlah_lane_s64(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Saturating rounding doubling multiply-add high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlah))]
+pub fn svqrdmlah_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmlah.nxv16i8"
+        )]
+        fn _svqrdmlah_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqrdmlah_s8(op1, op2, op3) }
+}
+#[doc = "Saturating rounding doubling multiply-add high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlah))]
+pub fn svqrdmlah_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svqrdmlah_s8(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Saturating rounding doubling multiply-add high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlah))]
+pub fn svqrdmlah_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmlah.nxv8i16"
+        )]
+        fn _svqrdmlah_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqrdmlah_s16(op1, op2, op3) }
+}
+#[doc = "Saturating rounding doubling multiply-add high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlah))]
+pub fn svqrdmlah_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svqrdmlah_s16(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Saturating rounding doubling multiply-add high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlah))]
+pub fn svqrdmlah_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmlah.nxv4i32"
+        )]
+        fn _svqrdmlah_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqrdmlah_s32(op1, op2, op3) }
+}
+#[doc = "Saturating rounding doubling multiply-add high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlah))]
+pub fn svqrdmlah_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svqrdmlah_s32(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Saturating rounding doubling multiply-add high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlah))]
+pub fn svqrdmlah_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmlah.nxv2i64"
+        )]
+        fn _svqrdmlah_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqrdmlah_s64(op1, op2, op3) }
+}
+#[doc = "Saturating rounding doubling multiply-add high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlah))]
+pub fn svqrdmlah_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svqrdmlah_s64(op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Saturating rounding doubling multiply-subtract high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))]
+pub fn svqrdmlsh_lane_s16<const IMM_INDEX: i32>(
+    op1: svint16_t,
+    op2: svint16_t,
+    op3: svint16_t,
+) -> svint16_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv8i16"
+        )]
+        fn _svqrdmlsh_lane_s16(
+            op1: svint16_t,
+            op2: svint16_t,
+            op3: svint16_t,
+            IMM_INDEX: i32,
+        ) -> svint16_t;
+    }
+    unsafe { _svqrdmlsh_lane_s16(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Saturating rounding doubling multiply-subtract high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))]
+pub fn svqrdmlsh_lane_s32<const IMM_INDEX: i32>(
+    op1: svint32_t,
+    op2: svint32_t,
+    op3: svint32_t,
+) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv4i32"
+        )]
+        fn _svqrdmlsh_lane_s32(
+            op1: svint32_t,
+            op2: svint32_t,
+            op3: svint32_t,
+            IMM_INDEX: i32,
+        ) -> svint32_t;
+    }
+    unsafe { _svqrdmlsh_lane_s32(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Saturating rounding doubling multiply-subtract high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))]
+pub fn svqrdmlsh_lane_s64<const IMM_INDEX: i32>(
+    op1: svint64_t,
+    op2: svint64_t,
+    op3: svint64_t,
+) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmlsh.lane.nxv2i64"
+        )]
+        fn _svqrdmlsh_lane_s64(
+            op1: svint64_t,
+            op2: svint64_t,
+            op3: svint64_t,
+            IMM_INDEX: i32,
+        ) -> svint64_t;
+    }
+    unsafe { _svqrdmlsh_lane_s64(op1, op2, op3, IMM_INDEX) }
+}
+#[doc = "Saturating rounding doubling multiply-subtract high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlsh))]
+pub fn svqrdmlsh_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmlsh.nxv16i8"
+        )]
+        fn _svqrdmlsh_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqrdmlsh_s8(op1, op2, op3) }
+}
+#[doc = "Saturating rounding doubling multiply-subtract high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlsh))]
+pub fn svqrdmlsh_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t {
+    svqrdmlsh_s8(op1, op2, svdup_n_s8(op3))
+}
+#[doc = "Saturating rounding doubling multiply-subtract high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlsh))]
+pub fn svqrdmlsh_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmlsh.nxv8i16"
+        )]
+        fn _svqrdmlsh_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqrdmlsh_s16(op1, op2, op3) }
+}
+#[doc = "Saturating rounding doubling multiply-subtract high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlsh))]
+pub fn svqrdmlsh_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t {
+    svqrdmlsh_s16(op1, op2, svdup_n_s16(op3))
+}
+#[doc = "Saturating rounding doubling multiply-subtract high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlsh))]
+pub fn svqrdmlsh_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmlsh.nxv4i32"
+        )]
+        fn _svqrdmlsh_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqrdmlsh_s32(op1, op2, op3) }
+}
+#[doc = "Saturating rounding doubling multiply-subtract high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlsh))]
+pub fn svqrdmlsh_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t {
+    svqrdmlsh_s32(op1, op2, svdup_n_s32(op3))
+}
+#[doc = "Saturating rounding doubling multiply-subtract high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlsh))]
+pub fn svqrdmlsh_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmlsh.nxv2i64"
+        )]
+        fn _svqrdmlsh_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqrdmlsh_s64(op1, op2, op3) }
+}
+#[doc = "Saturating rounding doubling multiply-subtract high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmlsh))]
+pub fn svqrdmlsh_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t {
+    svqrdmlsh_s64(op1, op2, svdup_n_s64(op3))
+}
+#[doc = "Saturating rounding doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))]
+pub fn svqrdmulh_lane_s16<const IMM_INDEX: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    static_assert_range!(IMM_INDEX, 0, 7);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv8i16"
+        )]
+        fn _svqrdmulh_lane_s16(op1: svint16_t, op2: svint16_t, imm_index: i32) -> svint16_t;
+    }
+    unsafe { _svqrdmulh_lane_s16(op1, op2, IMM_INDEX) }
+}
+#[doc = "Saturating rounding doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))]
+pub fn svqrdmulh_lane_s32<const IMM_INDEX: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    static_assert_range!(IMM_INDEX, 0, 3);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv4i32"
+        )]
+        fn _svqrdmulh_lane_s32(op1: svint32_t, op2: svint32_t, imm_index: i32) -> svint32_t;
+    }
+    unsafe { _svqrdmulh_lane_s32(op1, op2, IMM_INDEX) }
+}
+#[doc = "Saturating rounding doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))]
+pub fn svqrdmulh_lane_s64<const IMM_INDEX: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    static_assert_range!(IMM_INDEX, 0, 1);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmulh.lane.nxv2i64"
+        )]
+        fn _svqrdmulh_lane_s64(op1: svint64_t, op2: svint64_t, imm_index: i32) -> svint64_t;
+    }
+    unsafe { _svqrdmulh_lane_s64(op1, op2, IMM_INDEX) }
+}
+#[doc = "Saturating rounding doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmulh))]
+pub fn svqrdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmulh.nxv16i8"
+        )]
+        fn _svqrdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqrdmulh_s8(op1, op2) }
+}
+#[doc = "Saturating rounding doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmulh))]
+pub fn svqrdmulh_n_s8(op1: svint8_t, op2: i8) -> svint8_t {
+    svqrdmulh_s8(op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating rounding doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmulh))]
+pub fn svqrdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmulh.nxv8i16"
+        )]
+        fn _svqrdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqrdmulh_s16(op1, op2) }
+}
+#[doc = "Saturating rounding doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmulh))]
+pub fn svqrdmulh_n_s16(op1: svint16_t, op2: i16) -> svint16_t {
+    svqrdmulh_s16(op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating rounding doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmulh))]
+pub fn svqrdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmulh.nxv4i32"
+        )]
+        fn _svqrdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqrdmulh_s32(op1, op2) }
+}
+#[doc = "Saturating rounding doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmulh))]
+pub fn svqrdmulh_n_s32(op1: svint32_t, op2: i32) -> svint32_t {
+    svqrdmulh_s32(op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating rounding doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmulh))]
+pub fn svqrdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrdmulh.nxv2i64"
+        )]
+        fn _svqrdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqrdmulh_s64(op1, op2) }
+}
+#[doc = "Saturating rounding doubling multiply high"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrdmulh))]
+pub fn svqrdmulh_n_s64(op1: svint64_t, op2: i64) -> svint64_t {
+    svqrdmulh_s64(op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv16i8")]
+        fn _svqrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqrshl_s8_m(pg, op1, op2) }
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svqrshl_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svqrshl_s8_m(pg, op1, op2)
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svqrshl_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svqrshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svqrshl_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv8i16")]
+        fn _svqrshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqrshl_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svqrshl_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svqrshl_s16_m(pg, op1, op2)
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svqrshl_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svqrshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svqrshl_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv4i32")]
+        fn _svqrshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqrshl_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svqrshl_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svqrshl_s32_m(pg, op1, op2)
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svqrshl_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svqrshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svqrshl_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqrshl.nxv2i64")]
+        fn _svqrshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqrshl_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svqrshl_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svqrshl_s64_m(pg, op1, op2)
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svqrshl_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svqrshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshl))]
+pub fn svqrshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svqrshl_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv16i8")]
+        fn _svqrshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqrshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
+    svqrshl_u8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
+    svqrshl_u8_m(pg, op1, op2)
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
+    svqrshl_u8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
+    svqrshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
+    svqrshl_u8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv8i16")]
+        fn _svqrshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqrshl_u16_m(pg.into(), op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
+    svqrshl_u16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
+    svqrshl_u16_m(pg, op1, op2)
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
+    svqrshl_u16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
+    svqrshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
+    svqrshl_u16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv4i32")]
+        fn _svqrshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqrshl_u32_m(pg.into(), op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
+    svqrshl_u32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
+    svqrshl_u32_m(pg, op1, op2)
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
+    svqrshl_u32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
+    svqrshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
+    svqrshl_u32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqrshl.nxv2i64")]
+        fn _svqrshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqrshl_u64_m(pg.into(), op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
+    svqrshl_u64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
+    svqrshl_u64_m(pg, op1, op2)
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
+    svqrshl_u64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
+    svqrshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Saturating rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshl))]
+pub fn svqrshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
+    svqrshl_u64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating rounding shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))]
+pub fn svqrshrnb_n_s16<const IMM2: i32>(op1: svint16_t) -> svint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrshrnb.nxv8i16"
+        )]
+        fn _svqrshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svqrshrnb_n_s16(op1, IMM2) }
+}
+#[doc = "Saturating rounding shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))]
+pub fn svqrshrnb_n_s32<const IMM2: i32>(op1: svint32_t) -> svint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrshrnb.nxv4i32"
+        )]
+        fn _svqrshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svqrshrnb_n_s32(op1, IMM2) }
+}
+#[doc = "Saturating rounding shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))]
+pub fn svqrshrnb_n_s64<const IMM2: i32>(op1: svint64_t) -> svint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrshrnb.nxv2i64"
+        )]
+        fn _svqrshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svqrshrnb_n_s64(op1, IMM2) }
+}
+#[doc = "Saturating rounding shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))]
+pub fn svqrshrnb_n_u16<const IMM2: i32>(op1: svuint16_t) -> svuint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqrshrnb.nxv8i16"
+        )]
+        fn _svqrshrnb_n_u16(op1: svint16_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svqrshrnb_n_u16(op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Saturating rounding shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))]
+pub fn svqrshrnb_n_u32<const IMM2: i32>(op1: svuint32_t) -> svuint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqrshrnb.nxv4i32"
+        )]
+        fn _svqrshrnb_n_u32(op1: svint32_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svqrshrnb_n_u32(op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Saturating rounding shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))]
+pub fn svqrshrnb_n_u64<const IMM2: i32>(op1: svuint64_t) -> svuint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqrshrnb.nxv2i64"
+        )]
+        fn _svqrshrnb_n_u64(op1: svint64_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svqrshrnb_n_u64(op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Saturating rounding shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))]
+pub fn svqrshrnt_n_s16<const IMM2: i32>(even: svint8_t, op1: svint16_t) -> svint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrshrnt.nxv8i16"
+        )]
+        fn _svqrshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svqrshrnt_n_s16(even, op1, IMM2) }
+}
+#[doc = "Saturating rounding shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))]
+pub fn svqrshrnt_n_s32<const IMM2: i32>(even: svint16_t, op1: svint32_t) -> svint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrshrnt.nxv4i32"
+        )]
+        fn _svqrshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svqrshrnt_n_s32(even, op1, IMM2) }
+}
+#[doc = "Saturating rounding shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))]
+pub fn svqrshrnt_n_s64<const IMM2: i32>(even: svint32_t, op1: svint64_t) -> svint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrshrnt.nxv2i64"
+        )]
+        fn _svqrshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svqrshrnt_n_s64(even, op1, IMM2) }
+}
+#[doc = "Saturating rounding shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))]
+pub fn svqrshrnt_n_u16<const IMM2: i32>(even: svuint8_t, op1: svuint16_t) -> svuint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqrshrnt.nxv8i16"
+        )]
+        fn _svqrshrnt_n_u16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svqrshrnt_n_u16(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Saturating rounding shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))]
+pub fn svqrshrnt_n_u32<const IMM2: i32>(even: svuint16_t, op1: svuint32_t) -> svuint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqrshrnt.nxv4i32"
+        )]
+        fn _svqrshrnt_n_u32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svqrshrnt_n_u32(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Saturating rounding shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))]
+pub fn svqrshrnt_n_u64<const IMM2: i32>(even: svuint32_t, op1: svuint64_t) -> svuint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqrshrnt.nxv2i64"
+        )]
+        fn _svqrshrnt_n_u64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svqrshrnt_n_u64(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Saturating rounding shift right unsigned narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))]
+pub fn svqrshrunb_n_s16<const IMM2: i32>(op1: svint16_t) -> svuint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrshrunb.nxv8i16"
+        )]
+        fn _svqrshrunb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svqrshrunb_n_s16(op1, IMM2).as_unsigned() }
+}
+#[doc = "Saturating rounding shift right unsigned narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))]
+pub fn svqrshrunb_n_s32<const IMM2: i32>(op1: svint32_t) -> svuint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrshrunb.nxv4i32"
+        )]
+        fn _svqrshrunb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svqrshrunb_n_s32(op1, IMM2).as_unsigned() }
+}
+#[doc = "Saturating rounding shift right unsigned narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))]
+pub fn svqrshrunb_n_s64<const IMM2: i32>(op1: svint64_t) -> svuint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrshrunb.nxv2i64"
+        )]
+        fn _svqrshrunb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svqrshrunb_n_s64(op1, IMM2).as_unsigned() }
+}
+#[doc = "Saturating rounding shift right unsigned narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))]
+pub fn svqrshrunt_n_s16<const IMM2: i32>(even: svuint8_t, op1: svint16_t) -> svuint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrshrunt.nxv8i16"
+        )]
+        fn _svqrshrunt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svqrshrunt_n_s16(even.as_signed(), op1, IMM2).as_unsigned() }
+}
+#[doc = "Saturating rounding shift right unsigned narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))]
+pub fn svqrshrunt_n_s32<const IMM2: i32>(even: svuint16_t, op1: svint32_t) -> svuint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrshrunt.nxv4i32"
+        )]
+        fn _svqrshrunt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svqrshrunt_n_s32(even.as_signed(), op1, IMM2).as_unsigned() }
+}
+#[doc = "Saturating rounding shift right unsigned narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))]
+pub fn svqrshrunt_n_s64<const IMM2: i32>(even: svuint32_t, op1: svint64_t) -> svuint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqrshrunt.nxv2i64"
+        )]
+        fn _svqrshrunt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svqrshrunt_n_s64(even.as_signed(), op1, IMM2).as_unsigned() }
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv16i8")]
+        fn _svqshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqshl_s8_m(pg, op1, op2) }
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svqshl_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svqshl_s8_m(pg, op1, op2)
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svqshl_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svqshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svqshl_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv8i16")]
+        fn _svqshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqshl_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svqshl_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svqshl_s16_m(pg, op1, op2)
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svqshl_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svqshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svqshl_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv4i32")]
+        fn _svqshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqshl_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svqshl_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svqshl_s32_m(pg, op1, op2)
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svqshl_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svqshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svqshl_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshl.nxv2i64")]
+        fn _svqshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqshl_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svqshl_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svqshl_s64_m(pg, op1, op2)
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svqshl_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svqshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshl))]
+pub fn svqshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svqshl_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv16i8")]
+        fn _svqshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
+    svqshl_u8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
+    svqshl_u8_m(pg, op1, op2)
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
+    svqshl_u8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
+    svqshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
+    svqshl_u8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv8i16")]
+        fn _svqshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqshl_u16_m(pg.into(), op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
+    svqshl_u16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
+    svqshl_u16_m(pg, op1, op2)
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
+    svqshl_u16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
+    svqshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
+    svqshl_u16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv4i32")]
+        fn _svqshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqshl_u32_m(pg.into(), op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
+    svqshl_u32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
+    svqshl_u32_m(pg, op1, op2)
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
+    svqshl_u32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
+    svqshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
+    svqshl_u32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqshl.nxv2i64")]
+        fn _svqshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqshl_u64_m(pg.into(), op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
+    svqshl_u64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
+    svqshl_u64_m(pg, op1, op2)
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
+    svqshl_u64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
+    svqshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Saturating shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshl))]
+pub fn svqshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
+    svqshl_u64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating shift left unsigned"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
+pub fn svqshlu_n_s8_m<const IMM2: i32>(pg: svbool_t, op1: svint8_t) -> svuint8_t {
+    static_assert_range!(IMM2, 0, 7);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv16i8")]
+        fn _svqshlu_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svqshlu_n_s8_m(pg, op1, IMM2).as_unsigned() }
+}
+#[doc = "Saturating shift left unsigned"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
+pub fn svqshlu_n_s8_x<const IMM2: i32>(pg: svbool_t, op1: svint8_t) -> svuint8_t {
+    svqshlu_n_s8_m::<IMM2>(pg, op1)
+}
+#[doc = "Saturating shift left unsigned"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
+pub fn svqshlu_n_s8_z<const IMM2: i32>(pg: svbool_t, op1: svint8_t) -> svuint8_t {
+    svqshlu_n_s8_m::<IMM2>(pg, svsel_s8(pg, op1, svdup_n_s8(0)))
+}
+#[doc = "Saturating shift left unsigned"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
+pub fn svqshlu_n_s16_m<const IMM2: i32>(pg: svbool_t, op1: svint16_t) -> svuint16_t {
+    static_assert_range!(IMM2, 0, 15);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv8i16")]
+        fn _svqshlu_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svqshlu_n_s16_m(pg.into(), op1, IMM2).as_unsigned() }
+}
+#[doc = "Saturating shift left unsigned"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
+pub fn svqshlu_n_s16_x<const IMM2: i32>(pg: svbool_t, op1: svint16_t) -> svuint16_t {
+    svqshlu_n_s16_m::<IMM2>(pg, op1)
+}
+#[doc = "Saturating shift left unsigned"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
+pub fn svqshlu_n_s16_z<const IMM2: i32>(pg: svbool_t, op1: svint16_t) -> svuint16_t {
+    svqshlu_n_s16_m::<IMM2>(pg, svsel_s16(pg, op1, svdup_n_s16(0)))
+}
+#[doc = "Saturating shift left unsigned"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
+pub fn svqshlu_n_s32_m<const IMM2: i32>(pg: svbool_t, op1: svint32_t) -> svuint32_t {
+    static_assert_range!(IMM2, 0, 31);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv4i32")]
+        fn _svqshlu_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svqshlu_n_s32_m(pg.into(), op1, IMM2).as_unsigned() }
+}
+#[doc = "Saturating shift left unsigned"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
+pub fn svqshlu_n_s32_x<const IMM2: i32>(pg: svbool_t, op1: svint32_t) -> svuint32_t {
+    svqshlu_n_s32_m::<IMM2>(pg, op1)
+}
+#[doc = "Saturating shift left unsigned"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
+pub fn svqshlu_n_s32_z<const IMM2: i32>(pg: svbool_t, op1: svint32_t) -> svuint32_t {
+    svqshlu_n_s32_m::<IMM2>(pg, svsel_s32(pg, op1, svdup_n_s32(0)))
+}
+#[doc = "Saturating shift left unsigned"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
+pub fn svqshlu_n_s64_m<const IMM2: i32>(pg: svbool_t, op1: svint64_t) -> svuint64_t {
+    static_assert_range!(IMM2, 0, 63);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqshlu.nxv2i64")]
+        fn _svqshlu_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t;
+    }
+    unsafe { _svqshlu_n_s64_m(pg.into(), op1, IMM2).as_unsigned() }
+}
+#[doc = "Saturating shift left unsigned"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
+pub fn svqshlu_n_s64_x<const IMM2: i32>(pg: svbool_t, op1: svint64_t) -> svuint64_t {
+    svqshlu_n_s64_m::<IMM2>(pg, op1)
+}
+#[doc = "Saturating shift left unsigned"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))]
+pub fn svqshlu_n_s64_z<const IMM2: i32>(pg: svbool_t, op1: svint64_t) -> svuint64_t {
+    svqshlu_n_s64_m::<IMM2>(pg, svsel_s64(pg, op1, svdup_n_s64(0)))
+}
+#[doc = "Saturating shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))]
+pub fn svqshrnb_n_s16<const IMM2: i32>(op1: svint16_t) -> svint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqshrnb.nxv8i16"
+        )]
+        fn _svqshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svqshrnb_n_s16(op1, IMM2) }
+}
+#[doc = "Saturating shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))]
+pub fn svqshrnb_n_s32<const IMM2: i32>(op1: svint32_t) -> svint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqshrnb.nxv4i32"
+        )]
+        fn _svqshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svqshrnb_n_s32(op1, IMM2) }
+}
+#[doc = "Saturating shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))]
+pub fn svqshrnb_n_s64<const IMM2: i32>(op1: svint64_t) -> svint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqshrnb.nxv2i64"
+        )]
+        fn _svqshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svqshrnb_n_s64(op1, IMM2) }
+}
+#[doc = "Saturating shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))]
+pub fn svqshrnb_n_u16<const IMM2: i32>(op1: svuint16_t) -> svuint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqshrnb.nxv8i16"
+        )]
+        fn _svqshrnb_n_u16(op1: svint16_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svqshrnb_n_u16(op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Saturating shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))]
+pub fn svqshrnb_n_u32<const IMM2: i32>(op1: svuint32_t) -> svuint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqshrnb.nxv4i32"
+        )]
+        fn _svqshrnb_n_u32(op1: svint32_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svqshrnb_n_u32(op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Saturating shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))]
+pub fn svqshrnb_n_u64<const IMM2: i32>(op1: svuint64_t) -> svuint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqshrnb.nxv2i64"
+        )]
+        fn _svqshrnb_n_u64(op1: svint64_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svqshrnb_n_u64(op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Saturating shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))]
+pub fn svqshrnt_n_s16<const IMM2: i32>(even: svint8_t, op1: svint16_t) -> svint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqshrnt.nxv8i16"
+        )]
+        fn _svqshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svqshrnt_n_s16(even, op1, IMM2) }
+}
+#[doc = "Saturating shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))]
+pub fn svqshrnt_n_s32<const IMM2: i32>(even: svint16_t, op1: svint32_t) -> svint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqshrnt.nxv4i32"
+        )]
+        fn _svqshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svqshrnt_n_s32(even, op1, IMM2) }
+}
+#[doc = "Saturating shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))]
+pub fn svqshrnt_n_s64<const IMM2: i32>(even: svint32_t, op1: svint64_t) -> svint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqshrnt.nxv2i64"
+        )]
+        fn _svqshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svqshrnt_n_s64(even, op1, IMM2) }
+}
+#[doc = "Saturating shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))]
+pub fn svqshrnt_n_u16<const IMM2: i32>(even: svuint8_t, op1: svuint16_t) -> svuint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqshrnt.nxv8i16"
+        )]
+        fn _svqshrnt_n_u16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svqshrnt_n_u16(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Saturating shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))]
+pub fn svqshrnt_n_u32<const IMM2: i32>(even: svuint16_t, op1: svuint32_t) -> svuint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqshrnt.nxv4i32"
+        )]
+        fn _svqshrnt_n_u32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svqshrnt_n_u32(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Saturating shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))]
+pub fn svqshrnt_n_u64<const IMM2: i32>(even: svuint32_t, op1: svuint64_t) -> svuint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uqshrnt.nxv2i64"
+        )]
+        fn _svqshrnt_n_u64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svqshrnt_n_u64(even.as_signed(), op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Saturating shift right unsigned narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))]
+pub fn svqshrunb_n_s16<const IMM2: i32>(op1: svint16_t) -> svuint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqshrunb.nxv8i16"
+        )]
+        fn _svqshrunb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svqshrunb_n_s16(op1, IMM2).as_unsigned() }
+}
+#[doc = "Saturating shift right unsigned narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))]
+pub fn svqshrunb_n_s32<const IMM2: i32>(op1: svint32_t) -> svuint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqshrunb.nxv4i32"
+        )]
+        fn _svqshrunb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svqshrunb_n_s32(op1, IMM2).as_unsigned() }
+}
+#[doc = "Saturating shift right unsigned narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))]
+pub fn svqshrunb_n_s64<const IMM2: i32>(op1: svint64_t) -> svuint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqshrunb.nxv2i64"
+        )]
+        fn _svqshrunb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svqshrunb_n_s64(op1, IMM2).as_unsigned() }
+}
+#[doc = "Saturating shift right unsigned narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))]
+pub fn svqshrunt_n_s16<const IMM2: i32>(even: svuint8_t, op1: svint16_t) -> svuint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqshrunt.nxv8i16"
+        )]
+        fn _svqshrunt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svqshrunt_n_s16(even.as_signed(), op1, IMM2).as_unsigned() }
+}
+#[doc = "Saturating shift right unsigned narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))]
+pub fn svqshrunt_n_s32<const IMM2: i32>(even: svuint16_t, op1: svint32_t) -> svuint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqshrunt.nxv4i32"
+        )]
+        fn _svqshrunt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svqshrunt_n_s32(even.as_signed(), op1, IMM2).as_unsigned() }
+}
+#[doc = "Saturating shift right unsigned narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))]
+pub fn svqshrunt_n_s64<const IMM2: i32>(even: svuint32_t, op1: svint64_t) -> svuint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqshrunt.nxv2i64"
+        )]
+        fn _svqshrunt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svqshrunt_n_s64(even.as_signed(), op1, IMM2).as_unsigned() }
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv16i8")]
+        fn _svqsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqsub_s8_m(pg, op1, op2) }
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svqsub_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svqsub_s8_m(pg, op1, op2)
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svqsub_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svqsub_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svqsub_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv8i16")]
+        fn _svqsub_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqsub_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svqsub_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svqsub_s16_m(pg, op1, op2)
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svqsub_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svqsub_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svqsub_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv4i32")]
+        fn _svqsub_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqsub_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svqsub_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svqsub_s32_m(pg, op1, op2)
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svqsub_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svqsub_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svqsub_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsub.nxv2i64")]
+        fn _svqsub_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqsub_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svqsub_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svqsub_s64_m(pg, op1, op2)
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svqsub_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svqsub_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsub))]
+pub fn svqsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svqsub_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv16i8")]
+        fn _svqsub_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqsub_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svqsub_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svqsub_u8_m(pg, op1, op2)
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svqsub_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svqsub_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svqsub_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv8i16")]
+        fn _svqsub_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqsub_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svqsub_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svqsub_u16_m(pg, op1, op2)
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svqsub_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svqsub_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svqsub_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv4i32")]
+        fn _svqsub_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqsub_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svqsub_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svqsub_u32_m(pg, op1, op2)
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svqsub_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svqsub_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svqsub_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsub.nxv2i64")]
+        fn _svqsub_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqsub_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svqsub_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svqsub_u64_m(pg, op1, op2)
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svqsub_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svqsub_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Saturating subtract"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsub))]
+pub fn svqsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svqsub_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv16i8")]
+        fn _svqsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqsubr_s8_m(pg, op1, op2) }
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svqsubr_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svqsubr_s8_m(pg, op1, op2)
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svqsubr_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svqsubr_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svqsubr_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv8i16")]
+        fn _svqsubr_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqsubr_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svqsubr_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svqsubr_s16_m(pg, op1, op2)
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svqsubr_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svqsubr_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svqsubr_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv4i32")]
+        fn _svqsubr_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqsubr_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svqsubr_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svqsubr_s32_m(pg, op1, op2)
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svqsubr_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svqsubr_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svqsubr_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqsubr.nxv2i64")]
+        fn _svqsubr_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqsubr_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svqsubr_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svqsubr_s64_m(pg, op1, op2)
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svqsubr_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svqsubr_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqsubr))]
+pub fn svqsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svqsubr_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv16i8")]
+        fn _svqsubr_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svqsubr_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svqsubr_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svqsubr_u8_m(pg, op1, op2)
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svqsubr_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svqsubr_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svqsubr_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv8i16")]
+        fn _svqsubr_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svqsubr_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svqsubr_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svqsubr_u16_m(pg, op1, op2)
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svqsubr_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svqsubr_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svqsubr_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv4i32")]
+        fn _svqsubr_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svqsubr_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svqsubr_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svqsubr_u32_m(pg, op1, op2)
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svqsubr_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svqsubr_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svqsubr_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqsubr.nxv2i64")]
+        fn _svqsubr_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svqsubr_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svqsubr_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svqsubr_u64_m(pg, op1, op2)
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svqsubr_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svqsubr_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Saturating subtract reversed"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqsubr))]
+pub fn svqsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svqsubr_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Saturating extract narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqxtnb))]
+pub fn svqxtnb_s16(op: svint16_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv8i16")]
+        fn _svqxtnb_s16(op: svint16_t) -> svint8_t;
+    }
+    unsafe { _svqxtnb_s16(op) }
+}
+#[doc = "Saturating extract narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqxtnb))]
+pub fn svqxtnb_s32(op: svint32_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv4i32")]
+        fn _svqxtnb_s32(op: svint32_t) -> svint16_t;
+    }
+    unsafe { _svqxtnb_s32(op) }
+}
+#[doc = "Saturating extract narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqxtnb))]
+pub fn svqxtnb_s64(op: svint64_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnb.nxv2i64")]
+        fn _svqxtnb_s64(op: svint64_t) -> svint32_t;
+    }
+    unsafe { _svqxtnb_s64(op) }
+}
+#[doc = "Saturating extract narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqxtnb))]
+pub fn svqxtnb_u16(op: svuint16_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv8i16")]
+        fn _svqxtnb_u16(op: svint16_t) -> svint8_t;
+    }
+    unsafe { _svqxtnb_u16(op.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating extract narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqxtnb))]
+pub fn svqxtnb_u32(op: svuint32_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv4i32")]
+        fn _svqxtnb_u32(op: svint32_t) -> svint16_t;
+    }
+    unsafe { _svqxtnb_u32(op.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating extract narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqxtnb))]
+pub fn svqxtnb_u64(op: svuint64_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnb.nxv2i64")]
+        fn _svqxtnb_u64(op: svint64_t) -> svint32_t;
+    }
+    unsafe { _svqxtnb_u64(op.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating extract narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqxtnt))]
+pub fn svqxtnt_s16(even: svint8_t, op: svint16_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv8i16")]
+        fn _svqxtnt_s16(even: svint8_t, op: svint16_t) -> svint8_t;
+    }
+    unsafe { _svqxtnt_s16(even, op) }
+}
+#[doc = "Saturating extract narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqxtnt))]
+pub fn svqxtnt_s32(even: svint16_t, op: svint32_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv4i32")]
+        fn _svqxtnt_s32(even: svint16_t, op: svint32_t) -> svint16_t;
+    }
+    unsafe { _svqxtnt_s32(even, op) }
+}
+#[doc = "Saturating extract narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqxtnt))]
+pub fn svqxtnt_s64(even: svint32_t, op: svint64_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sqxtnt.nxv2i64")]
+        fn _svqxtnt_s64(even: svint32_t, op: svint64_t) -> svint32_t;
+    }
+    unsafe { _svqxtnt_s64(even, op) }
+}
+#[doc = "Saturating extract narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqxtnt))]
+pub fn svqxtnt_u16(even: svuint8_t, op: svuint16_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv8i16")]
+        fn _svqxtnt_u16(even: svint8_t, op: svint16_t) -> svint8_t;
+    }
+    unsafe { _svqxtnt_u16(even.as_signed(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating extract narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqxtnt))]
+pub fn svqxtnt_u32(even: svuint16_t, op: svuint32_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv4i32")]
+        fn _svqxtnt_u32(even: svint16_t, op: svint32_t) -> svint16_t;
+    }
+    unsafe { _svqxtnt_u32(even.as_signed(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating extract narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uqxtnt))]
+pub fn svqxtnt_u64(even: svuint32_t, op: svuint64_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.uqxtnt.nxv2i64")]
+        fn _svqxtnt_u64(even: svint32_t, op: svint64_t) -> svint32_t;
+    }
+    unsafe { _svqxtnt_u64(even.as_signed(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating extract unsigned narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqxtunb))]
+pub fn svqxtunb_s16(op: svint16_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqxtunb.nxv8i16"
+        )]
+        fn _svqxtunb_s16(op: svint16_t) -> svint8_t;
+    }
+    unsafe { _svqxtunb_s16(op).as_unsigned() }
+}
+#[doc = "Saturating extract unsigned narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqxtunb))]
+pub fn svqxtunb_s32(op: svint32_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqxtunb.nxv4i32"
+        )]
+        fn _svqxtunb_s32(op: svint32_t) -> svint16_t;
+    }
+    unsafe { _svqxtunb_s32(op).as_unsigned() }
+}
+#[doc = "Saturating extract unsigned narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqxtunb))]
+pub fn svqxtunb_s64(op: svint64_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqxtunb.nxv2i64"
+        )]
+        fn _svqxtunb_s64(op: svint64_t) -> svint32_t;
+    }
+    unsafe { _svqxtunb_s64(op).as_unsigned() }
+}
+#[doc = "Saturating extract unsigned narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqxtunt))]
+pub fn svqxtunt_s16(even: svuint8_t, op: svint16_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqxtunt.nxv8i16"
+        )]
+        fn _svqxtunt_s16(even: svint8_t, op: svint16_t) -> svint8_t;
+    }
+    unsafe { _svqxtunt_s16(even.as_signed(), op).as_unsigned() }
+}
+#[doc = "Saturating extract unsigned narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqxtunt))]
+pub fn svqxtunt_s32(even: svuint16_t, op: svint32_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqxtunt.nxv4i32"
+        )]
+        fn _svqxtunt_s32(even: svint16_t, op: svint32_t) -> svint16_t;
+    }
+    unsafe { _svqxtunt_s32(even.as_signed(), op).as_unsigned() }
+}
+#[doc = "Saturating extract unsigned narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sqxtunt))]
+pub fn svqxtunt_s64(even: svuint32_t, op: svint64_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sqxtunt.nxv2i64"
+        )]
+        fn _svqxtunt_s64(even: svint32_t, op: svint64_t) -> svint32_t;
+    }
+    unsafe { _svqxtunt_s64(even.as_signed(), op).as_unsigned() }
+}
+#[doc = "Rounding add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnb))]
+pub fn svraddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.raddhnb.nxv8i16"
+        )]
+        fn _svraddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t;
+    }
+    unsafe { _svraddhnb_s16(op1, op2) }
+}
+#[doc = "Rounding add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnb))]
+pub fn svraddhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t {
+    svraddhnb_s16(op1, svdup_n_s16(op2))
+}
+#[doc = "Rounding add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnb))]
+pub fn svraddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.raddhnb.nxv4i32"
+        )]
+        fn _svraddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t;
+    }
+    unsafe { _svraddhnb_s32(op1, op2) }
+}
+#[doc = "Rounding add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnb))]
+pub fn svraddhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t {
+    svraddhnb_s32(op1, svdup_n_s32(op2))
+}
+#[doc = "Rounding add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnb))]
+pub fn svraddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.raddhnb.nxv2i64"
+        )]
+        fn _svraddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t;
+    }
+    unsafe { _svraddhnb_s64(op1, op2) }
+}
+#[doc = "Rounding add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnb))]
+pub fn svraddhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t {
+    svraddhnb_s64(op1, svdup_n_s64(op2))
+}
+#[doc = "Rounding add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnb))]
+pub fn svraddhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t {
+    unsafe { svraddhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnb))]
+pub fn svraddhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t {
+    svraddhnb_u16(op1, svdup_n_u16(op2))
+}
+#[doc = "Rounding add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnb))]
+pub fn svraddhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t {
+    unsafe { svraddhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnb))]
+pub fn svraddhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t {
+    svraddhnb_u32(op1, svdup_n_u32(op2))
+}
+#[doc = "Rounding add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnb))]
+pub fn svraddhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t {
+    unsafe { svraddhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding add narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnb))]
+pub fn svraddhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t {
+    svraddhnb_u64(op1, svdup_n_u64(op2))
+}
+#[doc = "Rounding add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnt))]
+pub fn svraddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.raddhnt.nxv8i16"
+        )]
+        fn _svraddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t;
+    }
+    unsafe { _svraddhnt_s16(even, op1, op2) }
+}
+#[doc = "Rounding add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnt))]
+pub fn svraddhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t {
+    svraddhnt_s16(even, op1, svdup_n_s16(op2))
+}
+#[doc = "Rounding add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnt))]
+pub fn svraddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.raddhnt.nxv4i32"
+        )]
+        fn _svraddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t;
+    }
+    unsafe { _svraddhnt_s32(even, op1, op2) }
+}
+#[doc = "Rounding add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnt))]
+pub fn svraddhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t {
+    svraddhnt_s32(even, op1, svdup_n_s32(op2))
+}
+#[doc = "Rounding add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnt))]
+pub fn svraddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.raddhnt.nxv2i64"
+        )]
+        fn _svraddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t;
+    }
+    unsafe { _svraddhnt_s64(even, op1, op2) }
+}
+#[doc = "Rounding add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnt))]
+pub fn svraddhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t {
+    svraddhnt_s64(even, op1, svdup_n_s64(op2))
+}
+#[doc = "Rounding add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnt))]
+pub fn svraddhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t {
+    unsafe { svraddhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnt))]
+pub fn svraddhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t {
+    svraddhnt_u16(even, op1, svdup_n_u16(op2))
+}
+#[doc = "Rounding add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnt))]
+pub fn svraddhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t {
+    unsafe { svraddhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnt))]
+pub fn svraddhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t {
+    svraddhnt_u32(even, op1, svdup_n_u32(op2))
+}
+#[doc = "Rounding add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnt))]
+pub fn svraddhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t {
+    unsafe { svraddhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding add narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(raddhnt))]
+pub fn svraddhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t {
+    svraddhnt_u64(even, op1, svdup_n_u64(op2))
+}
+#[doc = "Bitwise rotate left by 1 and exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrax1[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-sha3")]
+#[cfg_attr(test, assert_instr(rax1))]
+pub fn svrax1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rax1")]
+        fn _svrax1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svrax1_s64(op1, op2) }
+}
+#[doc = "Bitwise rotate left by 1 and exclusive OR"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrax1[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-sha3")]
+#[cfg_attr(test, assert_instr(rax1))]
+pub fn svrax1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    unsafe { svrax1_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Reciprocal estimate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urecpe))]
+pub fn svrecpe_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urecpe.nxv4i32")]
+        fn _svrecpe_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svrecpe_u32_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Reciprocal estimate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urecpe))]
+pub fn svrecpe_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svrecpe_u32_m(op, pg, op)
+}
+#[doc = "Reciprocal estimate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urecpe))]
+pub fn svrecpe_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svrecpe_u32_m(svdup_n_u32(0), pg, op)
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv16i8")]
+        fn _svrhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svrhadd_s8_m(pg, op1, op2) }
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svrhadd_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svrhadd_s8_m(pg, op1, op2)
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svrhadd_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svrhadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svrhadd_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv8i16")]
+        fn _svrhadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svrhadd_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svrhadd_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svrhadd_s16_m(pg, op1, op2)
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svrhadd_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svrhadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svrhadd_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv4i32")]
+        fn _svrhadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svrhadd_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svrhadd_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svrhadd_s32_m(pg, op1, op2)
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svrhadd_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svrhadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svrhadd_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srhadd.nxv2i64")]
+        fn _svrhadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svrhadd_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svrhadd_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svrhadd_s64_m(pg, op1, op2)
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svrhadd_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svrhadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srhadd))]
+pub fn svrhadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svrhadd_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv16i8")]
+        fn _svrhadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svrhadd_u8_m(pg, op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svrhadd_u8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svrhadd_u8_m(pg, op1, op2)
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svrhadd_u8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    svrhadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t {
+    svrhadd_u8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv8i16")]
+        fn _svrhadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svrhadd_u16_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svrhadd_u16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svrhadd_u16_m(pg, op1, op2)
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svrhadd_u16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    svrhadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t {
+    svrhadd_u16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv4i32")]
+        fn _svrhadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svrhadd_u32_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svrhadd_u32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svrhadd_u32_m(pg, op1, op2)
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svrhadd_u32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    svrhadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t {
+    svrhadd_u32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urhadd.nxv2i64")]
+        fn _svrhadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svrhadd_u64_m(pg.into(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svrhadd_u64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svrhadd_u64_m(pg, op1, op2)
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svrhadd_u64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    svrhadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Rounding halving add"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urhadd))]
+pub fn svrhadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t {
+    svrhadd_u64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv16i8")]
+        fn _svrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svrshl_s8_m(pg, op1, op2) }
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svrshl_s8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svrshl_s8_m(pg, op1, op2)
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svrshl_s8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t {
+    svrshl_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t {
+    svrshl_s8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv8i16")]
+        fn _svrshl_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svrshl_s16_m(pg.into(), op1, op2) }
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svrshl_s16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svrshl_s16_m(pg, op1, op2)
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svrshl_s16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t {
+    svrshl_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t {
+    svrshl_s16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv4i32")]
+        fn _svrshl_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svrshl_s32_m(pg.into(), op1, op2) }
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svrshl_s32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svrshl_s32_m(pg, op1, op2)
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svrshl_s32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t {
+    svrshl_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t {
+    svrshl_s32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshl.nxv2i64")]
+        fn _svrshl_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svrshl_s64_m(pg.into(), op1, op2) }
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svrshl_s64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svrshl_s64_m(pg, op1, op2)
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svrshl_s64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t {
+    svrshl_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshl))]
+pub fn svrshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t {
+    svrshl_s64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv16i8")]
+        fn _svrshl_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svrshl_u8_m(pg, op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
+    svrshl_u8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
+    svrshl_u8_m(pg, op1, op2)
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
+    svrshl_u8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
+    svrshl_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
+    svrshl_u8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv8i16")]
+        fn _svrshl_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svrshl_u16_m(pg.into(), op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
+    svrshl_u16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
+    svrshl_u16_m(pg, op1, op2)
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
+    svrshl_u16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
+    svrshl_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
+    svrshl_u16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv4i32")]
+        fn _svrshl_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svrshl_u32_m(pg.into(), op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
+    svrshl_u32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
+    svrshl_u32_m(pg, op1, op2)
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
+    svrshl_u32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
+    svrshl_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
+    svrshl_u32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshl.nxv2i64")]
+        fn _svrshl_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svrshl_u64_m(pg.into(), op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
+    svrshl_u64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
+    svrshl_u64_m(pg, op1, op2)
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
+    svrshl_u64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
+    svrshl_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Rounding shift left"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshl))]
+pub fn svrshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
+    svrshl_u64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
+pub fn svrshr_n_s8_m<const IMM2: i32>(pg: svbool_t, op1: svint8_t) -> svint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv16i8")]
+        fn _svrshr_n_s8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svrshr_n_s8_m(pg, op1, IMM2) }
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
+pub fn svrshr_n_s8_x<const IMM2: i32>(pg: svbool_t, op1: svint8_t) -> svint8_t {
+    svrshr_n_s8_m::<IMM2>(pg, op1)
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
+pub fn svrshr_n_s8_z<const IMM2: i32>(pg: svbool_t, op1: svint8_t) -> svint8_t {
+    svrshr_n_s8_m::<IMM2>(pg, svsel_s8(pg, op1, svdup_n_s8(0)))
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
+pub fn svrshr_n_s16_m<const IMM2: i32>(pg: svbool_t, op1: svint16_t) -> svint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv8i16")]
+        fn _svrshr_n_s16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svrshr_n_s16_m(pg.into(), op1, IMM2) }
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
+pub fn svrshr_n_s16_x<const IMM2: i32>(pg: svbool_t, op1: svint16_t) -> svint16_t {
+    svrshr_n_s16_m::<IMM2>(pg, op1)
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
+pub fn svrshr_n_s16_z<const IMM2: i32>(pg: svbool_t, op1: svint16_t) -> svint16_t {
+    svrshr_n_s16_m::<IMM2>(pg, svsel_s16(pg, op1, svdup_n_s16(0)))
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
+pub fn svrshr_n_s32_m<const IMM2: i32>(pg: svbool_t, op1: svint32_t) -> svint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv4i32")]
+        fn _svrshr_n_s32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svrshr_n_s32_m(pg.into(), op1, IMM2) }
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
+pub fn svrshr_n_s32_x<const IMM2: i32>(pg: svbool_t, op1: svint32_t) -> svint32_t {
+    svrshr_n_s32_m::<IMM2>(pg, op1)
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
+pub fn svrshr_n_s32_z<const IMM2: i32>(pg: svbool_t, op1: svint32_t) -> svint32_t {
+    svrshr_n_s32_m::<IMM2>(pg, svsel_s32(pg, op1, svdup_n_s32(0)))
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
+pub fn svrshr_n_s64_m<const IMM2: i32>(pg: svbool_t, op1: svint64_t) -> svint64_t {
+    static_assert_range!(IMM2, 1, 64);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srshr.nxv2i64")]
+        fn _svrshr_n_s64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t;
+    }
+    unsafe { _svrshr_n_s64_m(pg.into(), op1, IMM2) }
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
+pub fn svrshr_n_s64_x<const IMM2: i32>(pg: svbool_t, op1: svint64_t) -> svint64_t {
+    svrshr_n_s64_m::<IMM2>(pg, op1)
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srshr, IMM2 = 1))]
+pub fn svrshr_n_s64_z<const IMM2: i32>(pg: svbool_t, op1: svint64_t) -> svint64_t {
+    svrshr_n_s64_m::<IMM2>(pg, svsel_s64(pg, op1, svdup_n_s64(0)))
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
+pub fn svrshr_n_u8_m<const IMM2: i32>(pg: svbool_t, op1: svuint8_t) -> svuint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv16i8")]
+        fn _svrshr_n_u8_m(pg: svbool_t, op1: svint8_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svrshr_n_u8_m(pg, op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
+pub fn svrshr_n_u8_x<const IMM2: i32>(pg: svbool_t, op1: svuint8_t) -> svuint8_t {
+    svrshr_n_u8_m::<IMM2>(pg, op1)
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
+pub fn svrshr_n_u8_z<const IMM2: i32>(pg: svbool_t, op1: svuint8_t) -> svuint8_t {
+    svrshr_n_u8_m::<IMM2>(pg, svsel_u8(pg, op1, svdup_n_u8(0)))
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
+pub fn svrshr_n_u16_m<const IMM2: i32>(pg: svbool_t, op1: svuint16_t) -> svuint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv8i16")]
+        fn _svrshr_n_u16_m(pg: svbool8_t, op1: svint16_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svrshr_n_u16_m(pg.into(), op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
+pub fn svrshr_n_u16_x<const IMM2: i32>(pg: svbool_t, op1: svuint16_t) -> svuint16_t {
+    svrshr_n_u16_m::<IMM2>(pg, op1)
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
+pub fn svrshr_n_u16_z<const IMM2: i32>(pg: svbool_t, op1: svuint16_t) -> svuint16_t {
+    svrshr_n_u16_m::<IMM2>(pg, svsel_u16(pg, op1, svdup_n_u16(0)))
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
+pub fn svrshr_n_u32_m<const IMM2: i32>(pg: svbool_t, op1: svuint32_t) -> svuint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv4i32")]
+        fn _svrshr_n_u32_m(pg: svbool4_t, op1: svint32_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svrshr_n_u32_m(pg.into(), op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
+pub fn svrshr_n_u32_x<const IMM2: i32>(pg: svbool_t, op1: svuint32_t) -> svuint32_t {
+    svrshr_n_u32_m::<IMM2>(pg, op1)
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
+pub fn svrshr_n_u32_z<const IMM2: i32>(pg: svbool_t, op1: svuint32_t) -> svuint32_t {
+    svrshr_n_u32_m::<IMM2>(pg, svsel_u32(pg, op1, svdup_n_u32(0)))
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
+pub fn svrshr_n_u64_m<const IMM2: i32>(pg: svbool_t, op1: svuint64_t) -> svuint64_t {
+    static_assert_range!(IMM2, 1, 64);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.urshr.nxv2i64")]
+        fn _svrshr_n_u64_m(pg: svbool2_t, op1: svint64_t, imm2: i32) -> svint64_t;
+    }
+    unsafe { _svrshr_n_u64_m(pg.into(), op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
+pub fn svrshr_n_u64_x<const IMM2: i32>(pg: svbool_t, op1: svuint64_t) -> svuint64_t {
+    svrshr_n_u64_m::<IMM2>(pg, op1)
+}
+#[doc = "Rounding shift right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(urshr, IMM2 = 1))]
+pub fn svrshr_n_u64_z<const IMM2: i32>(pg: svbool_t, op1: svuint64_t) -> svuint64_t {
+    svrshr_n_u64_m::<IMM2>(pg, svsel_u64(pg, op1, svdup_n_u64(0)))
+}
+#[doc = "Rounding shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))]
+pub fn svrshrnb_n_s16<const IMM2: i32>(op1: svint16_t) -> svint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv8i16")]
+        fn _svrshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svrshrnb_n_s16(op1, IMM2) }
+}
+#[doc = "Rounding shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))]
+pub fn svrshrnb_n_s32<const IMM2: i32>(op1: svint32_t) -> svint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv4i32")]
+        fn _svrshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svrshrnb_n_s32(op1, IMM2) }
+}
+#[doc = "Rounding shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))]
+pub fn svrshrnb_n_s64<const IMM2: i32>(op1: svint64_t) -> svint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnb.nxv2i64")]
+        fn _svrshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svrshrnb_n_s64(op1, IMM2) }
+}
+#[doc = "Rounding shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))]
+pub fn svrshrnb_n_u16<const IMM2: i32>(op1: svuint16_t) -> svuint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    unsafe { svrshrnb_n_s16::<IMM2>(op1.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))]
+pub fn svrshrnb_n_u32<const IMM2: i32>(op1: svuint32_t) -> svuint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    unsafe { svrshrnb_n_s32::<IMM2>(op1.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))]
+pub fn svrshrnb_n_u64<const IMM2: i32>(op1: svuint64_t) -> svuint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    unsafe { svrshrnb_n_s64::<IMM2>(op1.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))]
+pub fn svrshrnt_n_s16<const IMM2: i32>(even: svint8_t, op1: svint16_t) -> svint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv8i16")]
+        fn _svrshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svrshrnt_n_s16(even, op1, IMM2) }
+}
+#[doc = "Rounding shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))]
+pub fn svrshrnt_n_s32<const IMM2: i32>(even: svint16_t, op1: svint32_t) -> svint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv4i32")]
+        fn _svrshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svrshrnt_n_s32(even, op1, IMM2) }
+}
+#[doc = "Rounding shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))]
+pub fn svrshrnt_n_s64<const IMM2: i32>(even: svint32_t, op1: svint64_t) -> svint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rshrnt.nxv2i64")]
+        fn _svrshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svrshrnt_n_s64(even, op1, IMM2) }
+}
+#[doc = "Rounding shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))]
+pub fn svrshrnt_n_u16<const IMM2: i32>(even: svuint8_t, op1: svuint16_t) -> svuint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    unsafe { svrshrnt_n_s16::<IMM2>(even.as_signed(), op1.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))]
+pub fn svrshrnt_n_u32<const IMM2: i32>(even: svuint16_t, op1: svuint32_t) -> svuint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    unsafe { svrshrnt_n_s32::<IMM2>(even.as_signed(), op1.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))]
+pub fn svrshrnt_n_u64<const IMM2: i32>(even: svuint32_t, op1: svuint64_t) -> svuint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    unsafe { svrshrnt_n_s64::<IMM2>(even.as_signed(), op1.as_signed()).as_unsigned() }
+}
+#[doc = "Reciprocal square root estimate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ursqrte))]
+pub fn svrsqrte_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ursqrte.nxv4i32"
+        )]
+        fn _svrsqrte_u32_m(inactive: svint32_t, pg: svbool4_t, op: svint32_t) -> svint32_t;
+    }
+    unsafe { _svrsqrte_u32_m(inactive.as_signed(), pg.into(), op.as_signed()).as_unsigned() }
+}
+#[doc = "Reciprocal square root estimate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ursqrte))]
+pub fn svrsqrte_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svrsqrte_u32_m(op, pg, op)
+}
+#[doc = "Reciprocal square root estimate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ursqrte))]
+pub fn svrsqrte_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t {
+    svrsqrte_u32_m(svdup_n_u32(0), pg, op)
+}
+#[doc = "Rounding shift right and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))]
+pub fn svrsra_n_s8<const IMM3: i32>(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    static_assert_range!(IMM3, 1, 8);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv16i8")]
+        fn _svrsra_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t;
+    }
+    unsafe { _svrsra_n_s8(op1, op2, IMM3) }
+}
+#[doc = "Rounding shift right and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))]
+pub fn svrsra_n_s16<const IMM3: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    static_assert_range!(IMM3, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv8i16")]
+        fn _svrsra_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t;
+    }
+    unsafe { _svrsra_n_s16(op1, op2, IMM3) }
+}
+#[doc = "Rounding shift right and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))]
+pub fn svrsra_n_s32<const IMM3: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    static_assert_range!(IMM3, 1, 32);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv4i32")]
+        fn _svrsra_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t;
+    }
+    unsafe { _svrsra_n_s32(op1, op2, IMM3) }
+}
+#[doc = "Rounding shift right and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(srsra, IMM3 = 1))]
+pub fn svrsra_n_s64<const IMM3: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    static_assert_range!(IMM3, 1, 64);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.srsra.nxv2i64")]
+        fn _svrsra_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t;
+    }
+    unsafe { _svrsra_n_s64(op1, op2, IMM3) }
+}
+#[doc = "Rounding shift right and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))]
+pub fn svrsra_n_u8<const IMM3: i32>(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    static_assert_range!(IMM3, 1, 8);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv16i8")]
+        fn _svrsra_n_u8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t;
+    }
+    unsafe { _svrsra_n_u8(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() }
+}
+#[doc = "Rounding shift right and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))]
+pub fn svrsra_n_u16<const IMM3: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    static_assert_range!(IMM3, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv8i16")]
+        fn _svrsra_n_u16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t;
+    }
+    unsafe { _svrsra_n_u16(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() }
+}
+#[doc = "Rounding shift right and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))]
+pub fn svrsra_n_u32<const IMM3: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    static_assert_range!(IMM3, 1, 32);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv4i32")]
+        fn _svrsra_n_u32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t;
+    }
+    unsafe { _svrsra_n_u32(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() }
+}
+#[doc = "Rounding shift right and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ursra, IMM3 = 1))]
+pub fn svrsra_n_u64<const IMM3: i32>(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    static_assert_range!(IMM3, 1, 64);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ursra.nxv2i64")]
+        fn _svrsra_n_u64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t;
+    }
+    unsafe { _svrsra_n_u64(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() }
+}
+#[doc = "Rounding subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnb))]
+pub fn svrsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.rsubhnb.nxv8i16"
+        )]
+        fn _svrsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t;
+    }
+    unsafe { _svrsubhnb_s16(op1, op2) }
+}
+#[doc = "Rounding subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnb))]
+pub fn svrsubhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t {
+    svrsubhnb_s16(op1, svdup_n_s16(op2))
+}
+#[doc = "Rounding subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnb))]
+pub fn svrsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.rsubhnb.nxv4i32"
+        )]
+        fn _svrsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t;
+    }
+    unsafe { _svrsubhnb_s32(op1, op2) }
+}
+#[doc = "Rounding subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnb))]
+pub fn svrsubhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t {
+    svrsubhnb_s32(op1, svdup_n_s32(op2))
+}
+#[doc = "Rounding subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnb))]
+pub fn svrsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.rsubhnb.nxv2i64"
+        )]
+        fn _svrsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t;
+    }
+    unsafe { _svrsubhnb_s64(op1, op2) }
+}
+#[doc = "Rounding subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnb))]
+pub fn svrsubhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t {
+    svrsubhnb_s64(op1, svdup_n_s64(op2))
+}
+#[doc = "Rounding subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnb))]
+pub fn svrsubhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t {
+    unsafe { svrsubhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnb))]
+pub fn svrsubhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t {
+    svrsubhnb_u16(op1, svdup_n_u16(op2))
+}
+#[doc = "Rounding subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnb))]
+pub fn svrsubhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t {
+    unsafe { svrsubhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnb))]
+pub fn svrsubhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t {
+    svrsubhnb_u32(op1, svdup_n_u32(op2))
+}
+#[doc = "Rounding subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnb))]
+pub fn svrsubhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t {
+    unsafe { svrsubhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnb))]
+pub fn svrsubhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t {
+    svrsubhnb_u64(op1, svdup_n_u64(op2))
+}
+#[doc = "Rounding subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnt))]
+pub fn svrsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.rsubhnt.nxv8i16"
+        )]
+        fn _svrsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t;
+    }
+    unsafe { _svrsubhnt_s16(even, op1, op2) }
+}
+#[doc = "Rounding subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnt))]
+pub fn svrsubhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t {
+    svrsubhnt_s16(even, op1, svdup_n_s16(op2))
+}
+#[doc = "Rounding subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnt))]
+pub fn svrsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.rsubhnt.nxv4i32"
+        )]
+        fn _svrsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t;
+    }
+    unsafe { _svrsubhnt_s32(even, op1, op2) }
+}
+#[doc = "Rounding subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnt))]
+pub fn svrsubhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t {
+    svrsubhnt_s32(even, op1, svdup_n_s32(op2))
+}
+#[doc = "Rounding subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnt))]
+pub fn svrsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.rsubhnt.nxv2i64"
+        )]
+        fn _svrsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t;
+    }
+    unsafe { _svrsubhnt_s64(even, op1, op2) }
+}
+#[doc = "Rounding subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnt))]
+pub fn svrsubhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t {
+    svrsubhnt_s64(even, op1, svdup_n_s64(op2))
+}
+#[doc = "Rounding subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnt))]
+pub fn svrsubhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t {
+    unsafe { svrsubhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnt))]
+pub fn svrsubhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t {
+    svrsubhnt_u16(even, op1, svdup_n_u16(op2))
+}
+#[doc = "Rounding subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnt))]
+pub fn svrsubhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t {
+    unsafe { svrsubhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnt))]
+pub fn svrsubhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t {
+    svrsubhnt_u32(even, op1, svdup_n_u32(op2))
+}
+#[doc = "Rounding subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnt))]
+pub fn svrsubhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t {
+    unsafe { svrsubhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Rounding subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(rsubhnt))]
+pub fn svrsubhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t {
+    svrsubhnt_u64(even, op1, svdup_n_u64(op2))
+}
+#[doc = "Subtract with borrow long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sbclb))]
+pub fn svsbclb_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclb.nxv4i32")]
+        fn _svsbclb_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
+    }
+    unsafe { _svsbclb_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract with borrow long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sbclb))]
+pub fn svsbclb_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svsbclb_u32(op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Subtract with borrow long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sbclb))]
+pub fn svsbclb_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclb.nxv2i64")]
+        fn _svsbclb_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
+    }
+    unsafe { _svsbclb_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract with borrow long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sbclb))]
+pub fn svsbclb_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svsbclb_u64(op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Subtract with borrow long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sbclt))]
+pub fn svsbclt_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclt.nxv4i32")]
+        fn _svsbclt_u32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t;
+    }
+    unsafe { _svsbclt_u32(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract with borrow long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sbclt))]
+pub fn svsbclt_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t {
+    svsbclt_u32(op1, op2, svdup_n_u32(op3))
+}
+#[doc = "Subtract with borrow long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sbclt))]
+pub fn svsbclt_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sbclt.nxv2i64")]
+        fn _svsbclt_u64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t;
+    }
+    unsafe { _svsbclt_u64(op1.as_signed(), op2.as_signed(), op3.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract with borrow long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sbclt))]
+pub fn svsbclt_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t {
+    svsbclt_u64(op1, op2, svdup_n_u64(op3))
+}
+#[doc = "Shift left long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))]
+pub fn svshllb_n_s16<const IMM2: i32>(op1: svint8_t) -> svint16_t {
+    static_assert_range!(IMM2, 0, 7);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv8i16")]
+        fn _svshllb_n_s16(op1: svint8_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svshllb_n_s16(op1, IMM2) }
+}
+#[doc = "Shift left long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))]
+pub fn svshllb_n_s32<const IMM2: i32>(op1: svint16_t) -> svint32_t {
+    static_assert_range!(IMM2, 0, 15);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv4i32")]
+        fn _svshllb_n_s32(op1: svint16_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svshllb_n_s32(op1, IMM2) }
+}
+#[doc = "Shift left long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))]
+pub fn svshllb_n_s64<const IMM2: i32>(op1: svint32_t) -> svint64_t {
+    static_assert_range!(IMM2, 0, 31);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllb.nxv2i64")]
+        fn _svshllb_n_s64(op1: svint32_t, imm2: i32) -> svint64_t;
+    }
+    unsafe { _svshllb_n_s64(op1, IMM2) }
+}
+#[doc = "Shift left long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))]
+pub fn svshllb_n_u16<const IMM2: i32>(op1: svuint8_t) -> svuint16_t {
+    static_assert_range!(IMM2, 0, 7);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv8i16")]
+        fn _svshllb_n_u16(op1: svint8_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svshllb_n_u16(op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Shift left long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))]
+pub fn svshllb_n_u32<const IMM2: i32>(op1: svuint16_t) -> svuint32_t {
+    static_assert_range!(IMM2, 0, 15);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv4i32")]
+        fn _svshllb_n_u32(op1: svint16_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svshllb_n_u32(op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Shift left long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))]
+pub fn svshllb_n_u64<const IMM2: i32>(op1: svuint32_t) -> svuint64_t {
+    static_assert_range!(IMM2, 0, 31);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllb.nxv2i64")]
+        fn _svshllb_n_u64(op1: svint32_t, imm2: i32) -> svint64_t;
+    }
+    unsafe { _svshllb_n_u64(op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Shift left long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))]
+pub fn svshllt_n_s16<const IMM2: i32>(op1: svint8_t) -> svint16_t {
+    static_assert_range!(IMM2, 0, 7);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv8i16")]
+        fn _svshllt_n_s16(op1: svint8_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svshllt_n_s16(op1, IMM2) }
+}
+#[doc = "Shift left long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))]
+pub fn svshllt_n_s32<const IMM2: i32>(op1: svint16_t) -> svint32_t {
+    static_assert_range!(IMM2, 0, 15);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv4i32")]
+        fn _svshllt_n_s32(op1: svint16_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svshllt_n_s32(op1, IMM2) }
+}
+#[doc = "Shift left long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))]
+pub fn svshllt_n_s64<const IMM2: i32>(op1: svint32_t) -> svint64_t {
+    static_assert_range!(IMM2, 0, 31);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sshllt.nxv2i64")]
+        fn _svshllt_n_s64(op1: svint32_t, imm2: i32) -> svint64_t;
+    }
+    unsafe { _svshllt_n_s64(op1, IMM2) }
+}
+#[doc = "Shift left long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))]
+pub fn svshllt_n_u16<const IMM2: i32>(op1: svuint8_t) -> svuint16_t {
+    static_assert_range!(IMM2, 0, 7);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv8i16")]
+        fn _svshllt_n_u16(op1: svint8_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svshllt_n_u16(op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Shift left long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))]
+pub fn svshllt_n_u32<const IMM2: i32>(op1: svuint16_t) -> svuint32_t {
+    static_assert_range!(IMM2, 0, 15);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv4i32")]
+        fn _svshllt_n_u32(op1: svint16_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svshllt_n_u32(op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Shift left long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))]
+pub fn svshllt_n_u64<const IMM2: i32>(op1: svuint32_t) -> svuint64_t {
+    static_assert_range!(IMM2, 0, 31);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ushllt.nxv2i64")]
+        fn _svshllt_n_u64(op1: svint32_t, imm2: i32) -> svint64_t;
+    }
+    unsafe { _svshllt_n_u64(op1.as_signed(), IMM2).as_unsigned() }
+}
+#[doc = "Shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))]
+pub fn svshrnb_n_s16<const IMM2: i32>(op1: svint16_t) -> svint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv8i16")]
+        fn _svshrnb_n_s16(op1: svint16_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svshrnb_n_s16(op1, IMM2) }
+}
+#[doc = "Shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))]
+pub fn svshrnb_n_s32<const IMM2: i32>(op1: svint32_t) -> svint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv4i32")]
+        fn _svshrnb_n_s32(op1: svint32_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svshrnb_n_s32(op1, IMM2) }
+}
+#[doc = "Shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))]
+pub fn svshrnb_n_s64<const IMM2: i32>(op1: svint64_t) -> svint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnb.nxv2i64")]
+        fn _svshrnb_n_s64(op1: svint64_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svshrnb_n_s64(op1, IMM2) }
+}
+#[doc = "Shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))]
+pub fn svshrnb_n_u16<const IMM2: i32>(op1: svuint16_t) -> svuint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    unsafe { svshrnb_n_s16::<IMM2>(op1.as_signed()).as_unsigned() }
+}
+#[doc = "Shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))]
+pub fn svshrnb_n_u32<const IMM2: i32>(op1: svuint32_t) -> svuint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    unsafe { svshrnb_n_s32::<IMM2>(op1.as_signed()).as_unsigned() }
+}
+#[doc = "Shift right narrow (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))]
+pub fn svshrnb_n_u64<const IMM2: i32>(op1: svuint64_t) -> svuint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    unsafe { svshrnb_n_s64::<IMM2>(op1.as_signed()).as_unsigned() }
+}
+#[doc = "Shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))]
+pub fn svshrnt_n_s16<const IMM2: i32>(even: svint8_t, op1: svint16_t) -> svint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv8i16")]
+        fn _svshrnt_n_s16(even: svint8_t, op1: svint16_t, imm2: i32) -> svint8_t;
+    }
+    unsafe { _svshrnt_n_s16(even, op1, IMM2) }
+}
+#[doc = "Shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))]
+pub fn svshrnt_n_s32<const IMM2: i32>(even: svint16_t, op1: svint32_t) -> svint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv4i32")]
+        fn _svshrnt_n_s32(even: svint16_t, op1: svint32_t, imm2: i32) -> svint16_t;
+    }
+    unsafe { _svshrnt_n_s32(even, op1, IMM2) }
+}
+#[doc = "Shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))]
+pub fn svshrnt_n_s64<const IMM2: i32>(even: svint32_t, op1: svint64_t) -> svint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.shrnt.nxv2i64")]
+        fn _svshrnt_n_s64(even: svint32_t, op1: svint64_t, imm2: i32) -> svint32_t;
+    }
+    unsafe { _svshrnt_n_s64(even, op1, IMM2) }
+}
+#[doc = "Shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))]
+pub fn svshrnt_n_u16<const IMM2: i32>(even: svuint8_t, op1: svuint16_t) -> svuint8_t {
+    static_assert_range!(IMM2, 1, 8);
+    unsafe { svshrnt_n_s16::<IMM2>(even.as_signed(), op1.as_signed()).as_unsigned() }
+}
+#[doc = "Shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))]
+pub fn svshrnt_n_u32<const IMM2: i32>(even: svuint16_t, op1: svuint32_t) -> svuint16_t {
+    static_assert_range!(IMM2, 1, 16);
+    unsafe { svshrnt_n_s32::<IMM2>(even.as_signed(), op1.as_signed()).as_unsigned() }
+}
+#[doc = "Shift right narrow (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))]
+pub fn svshrnt_n_u64<const IMM2: i32>(even: svuint32_t, op1: svuint64_t) -> svuint32_t {
+    static_assert_range!(IMM2, 1, 32);
+    unsafe { svshrnt_n_s64::<IMM2>(even.as_signed(), op1.as_signed()).as_unsigned() }
+}
+#[doc = "Shift left and insert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sli, IMM3 = 0))]
+pub fn svsli_n_s8<const IMM3: i32>(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    static_assert_range!(IMM3, 0, 7);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv16i8")]
+        fn _svsli_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t;
+    }
+    unsafe { _svsli_n_s8(op1, op2, IMM3) }
+}
+#[doc = "Shift left and insert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sli, IMM3 = 0))]
+pub fn svsli_n_s16<const IMM3: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    static_assert_range!(IMM3, 0, 15);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv8i16")]
+        fn _svsli_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t;
+    }
+    unsafe { _svsli_n_s16(op1, op2, IMM3) }
+}
+#[doc = "Shift left and insert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sli, IMM3 = 0))]
+pub fn svsli_n_s32<const IMM3: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    static_assert_range!(IMM3, 0, 31);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv4i32")]
+        fn _svsli_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t;
+    }
+    unsafe { _svsli_n_s32(op1, op2, IMM3) }
+}
+#[doc = "Shift left and insert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sli, IMM3 = 0))]
+pub fn svsli_n_s64<const IMM3: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    static_assert_range!(IMM3, 0, 63);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sli.nxv2i64")]
+        fn _svsli_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t;
+    }
+    unsafe { _svsli_n_s64(op1, op2, IMM3) }
+}
+#[doc = "Shift left and insert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sli, IMM3 = 0))]
+pub fn svsli_n_u8<const IMM3: i32>(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    static_assert_range!(IMM3, 0, 7);
+    unsafe { svsli_n_s8::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Shift left and insert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sli, IMM3 = 0))]
+pub fn svsli_n_u16<const IMM3: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    static_assert_range!(IMM3, 0, 15);
+    unsafe { svsli_n_s16::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Shift left and insert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sli, IMM3 = 0))]
+pub fn svsli_n_u32<const IMM3: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    static_assert_range!(IMM3, 0, 31);
+    unsafe { svsli_n_s32::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Shift left and insert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sli, IMM3 = 0))]
+pub fn svsli_n_u64<const IMM3: i32>(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    static_assert_range!(IMM3, 0, 63);
+    unsafe { svsli_n_s64::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "SM4 encryption and decryption"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsm4e[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-sm4")]
+#[cfg_attr(test, assert_instr(sm4e))]
+pub fn svsm4e_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sm4e")]
+        fn _svsm4e_u32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svsm4e_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "SM4 key updates"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsm4ekey[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2,sve2-sm4")]
+#[cfg_attr(test, assert_instr(sm4ekey))]
+pub fn svsm4ekey_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sm4ekey")]
+        fn _svsm4ekey_u32(op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svsm4ekey_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv16i8")]
+        fn _svsqadd_u8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svsqadd_u8_m(pg, op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
+    svsqadd_u8_m(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
+    svsqadd_u8_m(pg, op1, op2)
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
+    svsqadd_u8_x(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t {
+    svsqadd_u8_m(pg, svsel_u8(pg, op1, svdup_n_u8(0)), op2)
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t {
+    svsqadd_u8_z(pg, op1, svdup_n_s8(op2))
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv8i16")]
+        fn _svsqadd_u16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svsqadd_u16_m(pg.into(), op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
+    svsqadd_u16_m(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
+    svsqadd_u16_m(pg, op1, op2)
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
+    svsqadd_u16_x(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t {
+    svsqadd_u16_m(pg, svsel_u16(pg, op1, svdup_n_u16(0)), op2)
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t {
+    svsqadd_u16_z(pg, op1, svdup_n_s16(op2))
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv4i32")]
+        fn _svsqadd_u32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svsqadd_u32_m(pg.into(), op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
+    svsqadd_u32_m(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
+    svsqadd_u32_m(pg, op1, op2)
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
+    svsqadd_u32_x(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t {
+    svsqadd_u32_m(pg, svsel_u32(pg, op1, svdup_n_u32(0)), op2)
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t {
+    svsqadd_u32_z(pg, op1, svdup_n_s32(op2))
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usqadd.nxv2i64")]
+        fn _svsqadd_u64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svsqadd_u64_m(pg.into(), op1.as_signed(), op2).as_unsigned() }
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
+    svsqadd_u64_m(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
+    svsqadd_u64_m(pg, op1, op2)
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
+    svsqadd_u64_x(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t {
+    svsqadd_u64_m(pg, svsel_u64(pg, op1, svdup_n_u64(0)), op2)
+}
+#[doc = "Saturating add with signed addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usqadd))]
+pub fn svsqadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t {
+    svsqadd_u64_z(pg, op1, svdup_n_s64(op2))
+}
+#[doc = "Shift right and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))]
+pub fn svsra_n_s8<const IMM3: i32>(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    static_assert_range!(IMM3, 1, 8);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv16i8")]
+        fn _svsra_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t;
+    }
+    unsafe { _svsra_n_s8(op1, op2, IMM3) }
+}
+#[doc = "Shift right and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))]
+pub fn svsra_n_s16<const IMM3: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    static_assert_range!(IMM3, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv8i16")]
+        fn _svsra_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t;
+    }
+    unsafe { _svsra_n_s16(op1, op2, IMM3) }
+}
+#[doc = "Shift right and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))]
+pub fn svsra_n_s32<const IMM3: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    static_assert_range!(IMM3, 1, 32);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv4i32")]
+        fn _svsra_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t;
+    }
+    unsafe { _svsra_n_s32(op1, op2, IMM3) }
+}
+#[doc = "Shift right and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssra, IMM3 = 1))]
+pub fn svsra_n_s64<const IMM3: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    static_assert_range!(IMM3, 1, 64);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssra.nxv2i64")]
+        fn _svsra_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t;
+    }
+    unsafe { _svsra_n_s64(op1, op2, IMM3) }
+}
+#[doc = "Shift right and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usra, IMM3 = 1))]
+pub fn svsra_n_u8<const IMM3: i32>(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    static_assert_range!(IMM3, 1, 8);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv16i8")]
+        fn _svsra_n_u8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t;
+    }
+    unsafe { _svsra_n_u8(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() }
+}
+#[doc = "Shift right and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usra, IMM3 = 1))]
+pub fn svsra_n_u16<const IMM3: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    static_assert_range!(IMM3, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv8i16")]
+        fn _svsra_n_u16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t;
+    }
+    unsafe { _svsra_n_u16(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() }
+}
+#[doc = "Shift right and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usra, IMM3 = 1))]
+pub fn svsra_n_u32<const IMM3: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    static_assert_range!(IMM3, 1, 32);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv4i32")]
+        fn _svsra_n_u32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t;
+    }
+    unsafe { _svsra_n_u32(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() }
+}
+#[doc = "Shift right and accumulate"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usra, IMM3 = 1))]
+pub fn svsra_n_u64<const IMM3: i32>(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    static_assert_range!(IMM3, 1, 64);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usra.nxv2i64")]
+        fn _svsra_n_u64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t;
+    }
+    unsafe { _svsra_n_u64(op1.as_signed(), op2.as_signed(), IMM3).as_unsigned() }
+}
+#[doc = "Shift right and insert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sri, IMM3 = 1))]
+pub fn svsri_n_s8<const IMM3: i32>(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    static_assert_range!(IMM3, 1, 8);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv16i8")]
+        fn _svsri_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t;
+    }
+    unsafe { _svsri_n_s8(op1, op2, IMM3) }
+}
+#[doc = "Shift right and insert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sri, IMM3 = 1))]
+pub fn svsri_n_s16<const IMM3: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    static_assert_range!(IMM3, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv8i16")]
+        fn _svsri_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t;
+    }
+    unsafe { _svsri_n_s16(op1, op2, IMM3) }
+}
+#[doc = "Shift right and insert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sri, IMM3 = 1))]
+pub fn svsri_n_s32<const IMM3: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    static_assert_range!(IMM3, 1, 32);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv4i32")]
+        fn _svsri_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t;
+    }
+    unsafe { _svsri_n_s32(op1, op2, IMM3) }
+}
+#[doc = "Shift right and insert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sri, IMM3 = 1))]
+pub fn svsri_n_s64<const IMM3: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    static_assert_range!(IMM3, 1, 64);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.sri.nxv2i64")]
+        fn _svsri_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t;
+    }
+    unsafe { _svsri_n_s64(op1, op2, IMM3) }
+}
+#[doc = "Shift right and insert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sri, IMM3 = 1))]
+pub fn svsri_n_u8<const IMM3: i32>(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    static_assert_range!(IMM3, 1, 8);
+    unsafe { svsri_n_s8::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Shift right and insert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sri, IMM3 = 1))]
+pub fn svsri_n_u16<const IMM3: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    static_assert_range!(IMM3, 1, 16);
+    unsafe { svsri_n_s16::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Shift right and insert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sri, IMM3 = 1))]
+pub fn svsri_n_u32<const IMM3: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    static_assert_range!(IMM3, 1, 32);
+    unsafe { svsri_n_s32::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Shift right and insert"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sri, IMM3 = 1))]
+pub fn svsri_n_u64<const IMM3: i32>(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    static_assert_range!(IMM3, 1, 64);
+    unsafe { svsri_n_s64::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_s64index_f64(
+    pg: svbool_t,
+    base: *mut f64,
+    indices: svint64_t,
+    data: svfloat64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2f64"
+        )]
+        fn _svstnt1_scatter_s64index_f64(
+            data: svfloat64_t,
+            pg: svbool2_t,
+            base: *mut f64,
+            indices: svint64_t,
+        );
+    }
+    _svstnt1_scatter_s64index_f64(data, pg.into(), base, indices)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_s64index_s64(
+    pg: svbool_t,
+    base: *mut i64,
+    indices: svint64_t,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i64"
+        )]
+        fn _svstnt1_scatter_s64index_s64(
+            data: svint64_t,
+            pg: svbool2_t,
+            base: *mut i64,
+            indices: svint64_t,
+        );
+    }
+    _svstnt1_scatter_s64index_s64(data, pg.into(), base, indices)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_s64index_u64(
+    pg: svbool_t,
+    base: *mut u64,
+    indices: svint64_t,
+    data: svuint64_t,
+) {
+    svstnt1_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed())
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_u64index_f64(
+    pg: svbool_t,
+    base: *mut f64,
+    indices: svuint64_t,
+    data: svfloat64_t,
+) {
+    svstnt1_scatter_s64index_f64(pg, base, indices.as_signed(), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_u64index_s64(
+    pg: svbool_t,
+    base: *mut i64,
+    indices: svuint64_t,
+    data: svint64_t,
+) {
+    svstnt1_scatter_s64index_s64(pg, base, indices.as_signed(), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_u64index_u64(
+    pg: svbool_t,
+    base: *mut u64,
+    indices: svuint64_t,
+    data: svuint64_t,
+) {
+    svstnt1_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed())
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_s64offset_f64(
+    pg: svbool_t,
+    base: *mut f64,
+    offsets: svint64_t,
+    data: svfloat64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2f64"
+        )]
+        fn _svstnt1_scatter_s64offset_f64(
+            data: svfloat64_t,
+            pg: svbool2_t,
+            base: *mut f64,
+            offsets: svint64_t,
+        );
+    }
+    _svstnt1_scatter_s64offset_f64(data, pg.into(), base, offsets)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_s64offset_s64(
+    pg: svbool_t,
+    base: *mut i64,
+    offsets: svint64_t,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i64"
+        )]
+        fn _svstnt1_scatter_s64offset_s64(
+            data: svint64_t,
+            pg: svbool2_t,
+            base: *mut i64,
+            offsets: svint64_t,
+        );
+    }
+    _svstnt1_scatter_s64offset_s64(data, pg.into(), base, offsets)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[s64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_s64offset_u64(
+    pg: svbool_t,
+    base: *mut u64,
+    offsets: svint64_t,
+    data: svuint64_t,
+) {
+    svstnt1_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed())
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1_scatter_u32offset_f32(
+    pg: svbool_t,
+    base: *mut f32,
+    offsets: svuint32_t,
+    data: svfloat32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4f32"
+        )]
+        fn _svstnt1_scatter_u32offset_f32(
+            data: svfloat32_t,
+            pg: svbool4_t,
+            base: *mut f32,
+            offsets: svint32_t,
+        );
+    }
+    _svstnt1_scatter_u32offset_f32(data, pg.into(), base, offsets.as_signed())
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1_scatter_u32offset_s32(
+    pg: svbool_t,
+    base: *mut i32,
+    offsets: svuint32_t,
+    data: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i32"
+        )]
+        fn _svstnt1_scatter_u32offset_s32(
+            data: svint32_t,
+            pg: svbool4_t,
+            base: *mut i32,
+            offsets: svint32_t,
+        );
+    }
+    _svstnt1_scatter_u32offset_s32(data, pg.into(), base, offsets.as_signed())
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u32]offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1_scatter_u32offset_u32(
+    pg: svbool_t,
+    base: *mut u32,
+    offsets: svuint32_t,
+    data: svuint32_t,
+) {
+    svstnt1_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed())
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_u64offset_f64(
+    pg: svbool_t,
+    base: *mut f64,
+    offsets: svuint64_t,
+    data: svfloat64_t,
+) {
+    svstnt1_scatter_s64offset_f64(pg, base, offsets.as_signed(), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_u64offset_s64(
+    pg: svbool_t,
+    base: *mut i64,
+    offsets: svuint64_t,
+    data: svint64_t,
+) {
+    svstnt1_scatter_s64offset_s64(pg, base, offsets.as_signed(), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter_[u64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_u64offset_u64(
+    pg: svbool_t,
+    base: *mut u64,
+    offsets: svuint64_t,
+    data: svuint64_t,
+) {
+    svstnt1_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed())
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1_scatter_u32base_f32(pg: svbool_t, bases: svuint32_t, data: svfloat32_t) {
+    svstnt1_scatter_u32base_offset_f32(pg, bases, 0, data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) {
+    svstnt1_scatter_u32base_offset_s32(pg, bases, 0, data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) {
+    svstnt1_scatter_u32base_offset_u32(pg, bases, 0, data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_u64base_f64(pg: svbool_t, bases: svuint64_t, data: svfloat64_t) {
+    svstnt1_scatter_u64base_offset_f64(pg, bases, 0, data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) {
+    svstnt1_scatter_u64base_offset_s64(pg, bases, 0, data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) {
+    svstnt1_scatter_u64base_offset_u64(pg, bases, 0, data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1_scatter_u32base_index_f32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+    data: svfloat32_t,
+) {
+    svstnt1_scatter_u32base_offset_f32(pg, bases, index.unchecked_shl(2), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1_scatter_u32base_index_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+    data: svint32_t,
+) {
+    svstnt1_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(2), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_index[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1_scatter_u32base_index_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+    data: svuint32_t,
+) {
+    svstnt1_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(2), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_u64base_index_f64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+    data: svfloat64_t,
+) {
+    svstnt1_scatter_u64base_offset_f64(pg, bases, index.unchecked_shl(3), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+    data: svint64_t,
+) {
+    svstnt1_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(3), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+    data: svuint64_t,
+) {
+    svstnt1_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(3), data)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1_scatter_u32base_offset_f32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+    data: svfloat32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4f32.nxv4i32"
+        )]
+        fn _svstnt1_scatter_u32base_offset_f32(
+            data: svfloat32_t,
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        );
+    }
+    _svstnt1_scatter_u32base_offset_f32(data, pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1_scatter_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+    data: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i32.nxv4i32"
+        )]
+        fn _svstnt1_scatter_u32base_offset_s32(
+            data: svint32_t,
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        );
+    }
+    _svstnt1_scatter_u32base_offset_s32(data, pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u32base]_offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1_scatter_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+    data: svuint32_t,
+) {
+    svstnt1_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed())
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_u64base_offset_f64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+    data: svfloat64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2f64.nxv2i64"
+        )]
+        fn _svstnt1_scatter_u64base_offset_f64(
+            data: svfloat64_t,
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        );
+    }
+    _svstnt1_scatter_u64base_offset_f64(data, pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i64.nxv2i64"
+        )]
+        fn _svstnt1_scatter_u64base_offset_s64(
+            data: svint64_t,
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        );
+    }
+    _svstnt1_scatter_u64base_offset_s64(data, pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Non-truncating store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1_scatter[_u64base]_offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1d))]
+pub unsafe fn svstnt1_scatter_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+    data: svuint64_t,
+) {
+    svstnt1_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed())
+}
+#[doc = "Truncate to 8 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[s64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1b))]
+pub unsafe fn svstnt1b_scatter_s64offset_s64(
+    pg: svbool_t,
+    base: *mut i8,
+    offsets: svint64_t,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i8"
+        )]
+        fn _svstnt1b_scatter_s64offset_s64(
+            data: nxv2i8,
+            pg: svbool2_t,
+            base: *mut i8,
+            offsets: svint64_t,
+        );
+    }
+    _svstnt1b_scatter_s64offset_s64(simd_cast(data), pg.into(), base, offsets)
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_s64offset_s64(
+    pg: svbool_t,
+    base: *mut i16,
+    offsets: svint64_t,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i16"
+        )]
+        fn _svstnt1h_scatter_s64offset_s64(
+            data: nxv2i16,
+            pg: svbool2_t,
+            base: *mut i16,
+            offsets: svint64_t,
+        );
+    }
+    _svstnt1h_scatter_s64offset_s64(simd_cast(data), pg.into(), base, offsets)
+}
+#[doc = "Truncate to 32 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1w_scatter_s64offset_s64(
+    pg: svbool_t,
+    base: *mut i32,
+    offsets: svint64_t,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.nxv2i32"
+        )]
+        fn _svstnt1w_scatter_s64offset_s64(
+            data: nxv2i32,
+            pg: svbool2_t,
+            base: *mut i32,
+            offsets: svint64_t,
+        );
+    }
+    _svstnt1w_scatter_s64offset_s64(simd_cast(data), pg.into(), base, offsets)
+}
+#[doc = "Truncate to 8 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[s64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1b))]
+pub unsafe fn svstnt1b_scatter_s64offset_u64(
+    pg: svbool_t,
+    base: *mut u8,
+    offsets: svint64_t,
+    data: svuint64_t,
+) {
+    svstnt1b_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed())
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_s64offset_u64(
+    pg: svbool_t,
+    base: *mut u16,
+    offsets: svint64_t,
+    data: svuint64_t,
+) {
+    svstnt1h_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed())
+}
+#[doc = "Truncate to 32 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1w_scatter_s64offset_u64(
+    pg: svbool_t,
+    base: *mut u32,
+    offsets: svint64_t,
+    data: svuint64_t,
+) {
+    svstnt1w_scatter_s64offset_s64(pg, base.as_signed(), offsets, data.as_signed())
+}
+#[doc = "Truncate to 8 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u32]offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1b))]
+pub unsafe fn svstnt1b_scatter_u32offset_s32(
+    pg: svbool_t,
+    base: *mut i8,
+    offsets: svuint32_t,
+    data: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i8"
+        )]
+        fn _svstnt1b_scatter_u32offset_s32(
+            data: nxv4i8,
+            pg: svbool4_t,
+            base: *mut i8,
+            offsets: svint32_t,
+        );
+    }
+    _svstnt1b_scatter_u32offset_s32(simd_cast(data), pg.into(), base, offsets.as_signed())
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u32]offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_u32offset_s32(
+    pg: svbool_t,
+    base: *mut i16,
+    offsets: svuint32_t,
+    data: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.uxtw.nxv4i16"
+        )]
+        fn _svstnt1h_scatter_u32offset_s32(
+            data: nxv4i16,
+            pg: svbool4_t,
+            base: *mut i16,
+            offsets: svint32_t,
+        );
+    }
+    _svstnt1h_scatter_u32offset_s32(simd_cast(data), pg.into(), base, offsets.as_signed())
+}
+#[doc = "Truncate to 8 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u32]offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1b))]
+pub unsafe fn svstnt1b_scatter_u32offset_u32(
+    pg: svbool_t,
+    base: *mut u8,
+    offsets: svuint32_t,
+    data: svuint32_t,
+) {
+    svstnt1b_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed())
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u32]offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_u32offset_u32(
+    pg: svbool_t,
+    base: *mut u16,
+    offsets: svuint32_t,
+    data: svuint32_t,
+) {
+    svstnt1h_scatter_u32offset_s32(pg, base.as_signed(), offsets, data.as_signed())
+}
+#[doc = "Truncate to 8 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1b))]
+pub unsafe fn svstnt1b_scatter_u64offset_s64(
+    pg: svbool_t,
+    base: *mut i8,
+    offsets: svuint64_t,
+    data: svint64_t,
+) {
+    svstnt1b_scatter_s64offset_s64(pg, base, offsets.as_signed(), data)
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_u64offset_s64(
+    pg: svbool_t,
+    base: *mut i16,
+    offsets: svuint64_t,
+    data: svint64_t,
+) {
+    svstnt1h_scatter_s64offset_s64(pg, base, offsets.as_signed(), data)
+}
+#[doc = "Truncate to 32 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1w_scatter_u64offset_s64(
+    pg: svbool_t,
+    base: *mut i32,
+    offsets: svuint64_t,
+    data: svint64_t,
+) {
+    svstnt1w_scatter_s64offset_s64(pg, base, offsets.as_signed(), data)
+}
+#[doc = "Truncate to 8 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter_[u64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1b))]
+pub unsafe fn svstnt1b_scatter_u64offset_u64(
+    pg: svbool_t,
+    base: *mut u8,
+    offsets: svuint64_t,
+    data: svuint64_t,
+) {
+    svstnt1b_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed())
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_u64offset_u64(
+    pg: svbool_t,
+    base: *mut u16,
+    offsets: svuint64_t,
+    data: svuint64_t,
+) {
+    svstnt1h_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed())
+}
+#[doc = "Truncate to 32 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1w_scatter_u64offset_u64(
+    pg: svbool_t,
+    base: *mut u32,
+    offsets: svuint64_t,
+    data: svuint64_t,
+) {
+    svstnt1w_scatter_s64offset_s64(pg, base.as_signed(), offsets.as_signed(), data.as_signed())
+}
+#[doc = "Truncate to 8 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base]_offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1b))]
+pub unsafe fn svstnt1b_scatter_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+    data: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i8.nxv4i32"
+        )]
+        fn _svstnt1b_scatter_u32base_offset_s32(
+            data: nxv4i8,
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        );
+    }
+    _svstnt1b_scatter_u32base_offset_s32(simd_cast(data), pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_offset[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_u32base_offset_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+    data: svint32_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv4i16.nxv4i32"
+        )]
+        fn _svstnt1h_scatter_u32base_offset_s32(
+            data: nxv4i16,
+            pg: svbool4_t,
+            bases: svint32_t,
+            offset: i64,
+        );
+    }
+    _svstnt1h_scatter_u32base_offset_s32(simd_cast(data), pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Truncate to 8 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base]_offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1b))]
+pub unsafe fn svstnt1b_scatter_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+    data: svuint32_t,
+) {
+    svstnt1b_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed())
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_offset[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_u32base_offset_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    offset: i64,
+    data: svuint32_t,
+) {
+    svstnt1h_scatter_u32base_offset_s32(pg, bases, offset, data.as_signed())
+}
+#[doc = "Truncate to 8 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base]_offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1b))]
+pub unsafe fn svstnt1b_scatter_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i8.nxv2i64"
+        )]
+        fn _svstnt1b_scatter_u64base_offset_s64(
+            data: nxv2i8,
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        );
+    }
+    _svstnt1b_scatter_u64base_offset_s64(simd_cast(data), pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i16.nxv2i64"
+        )]
+        fn _svstnt1h_scatter_u64base_offset_s64(
+            data: nxv2i16,
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        );
+    }
+    _svstnt1h_scatter_u64base_offset_s64(simd_cast(data), pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Truncate to 32 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_offset[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1w_scatter_u64base_offset_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.scalar.offset.nxv2i32.nxv2i64"
+        )]
+        fn _svstnt1w_scatter_u64base_offset_s64(
+            data: nxv2i32,
+            pg: svbool2_t,
+            bases: svint64_t,
+            offset: i64,
+        );
+    }
+    _svstnt1w_scatter_u64base_offset_s64(simd_cast(data), pg.into(), bases.as_signed(), offset)
+}
+#[doc = "Truncate to 8 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base]_offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1b))]
+pub unsafe fn svstnt1b_scatter_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+    data: svuint64_t,
+) {
+    svstnt1b_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed())
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+    data: svuint64_t,
+) {
+    svstnt1h_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed())
+}
+#[doc = "Truncate to 32 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_offset[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1w_scatter_u64base_offset_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    offset: i64,
+    data: svuint64_t,
+) {
+    svstnt1w_scatter_u64base_offset_s64(pg, bases, offset, data.as_signed())
+}
+#[doc = "Truncate to 8 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1b))]
+pub unsafe fn svstnt1b_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) {
+    svstnt1b_scatter_u32base_offset_s32(pg, bases, 0, data)
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: svint32_t) {
+    svstnt1h_scatter_u32base_offset_s32(pg, bases, 0, data)
+}
+#[doc = "Truncate to 8 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u32base_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1b))]
+pub unsafe fn svstnt1b_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) {
+    svstnt1b_scatter_u32base_offset_u32(pg, bases, 0, data)
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: svuint32_t) {
+    svstnt1h_scatter_u32base_offset_u32(pg, bases, 0, data)
+}
+#[doc = "Truncate to 8 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1b))]
+pub unsafe fn svstnt1b_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) {
+    svstnt1b_scatter_u64base_offset_s64(pg, bases, 0, data)
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) {
+    svstnt1h_scatter_u64base_offset_s64(pg, bases, 0, data)
+}
+#[doc = "Truncate to 32 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1w_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: svint64_t) {
+    svstnt1w_scatter_u64base_offset_s64(pg, bases, 0, data)
+}
+#[doc = "Truncate to 8 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1b_scatter[_u64base_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1b))]
+pub unsafe fn svstnt1b_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) {
+    svstnt1b_scatter_u64base_offset_u64(pg, bases, 0, data)
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) {
+    svstnt1h_scatter_u64base_offset_u64(pg, bases, 0, data)
+}
+#[doc = "Truncate to 32 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1w_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: svuint64_t) {
+    svstnt1w_scatter_u64base_offset_u64(pg, bases, 0, data)
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_s64index_s64(
+    pg: svbool_t,
+    base: *mut i16,
+    indices: svint64_t,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i16"
+        )]
+        fn _svstnt1h_scatter_s64index_s64(
+            data: nxv2i16,
+            pg: svbool2_t,
+            base: *mut i16,
+            indices: svint64_t,
+        );
+    }
+    _svstnt1h_scatter_s64index_s64(simd_cast(data), pg.into(), base, indices)
+}
+#[doc = "Truncate to 32 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1w_scatter_s64index_s64(
+    pg: svbool_t,
+    base: *mut i32,
+    indices: svint64_t,
+    data: svint64_t,
+) {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.stnt1.scatter.index.nxv2i32"
+        )]
+        fn _svstnt1w_scatter_s64index_s64(
+            data: nxv2i32,
+            pg: svbool2_t,
+            base: *mut i32,
+            indices: svint64_t,
+        );
+    }
+    _svstnt1w_scatter_s64index_s64(simd_cast(data), pg.into(), base, indices)
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[s64]index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_s64index_u64(
+    pg: svbool_t,
+    base: *mut u16,
+    indices: svint64_t,
+    data: svuint64_t,
+) {
+    svstnt1h_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed())
+}
+#[doc = "Truncate to 32 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[s64]index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1w_scatter_s64index_u64(
+    pg: svbool_t,
+    base: *mut u32,
+    indices: svint64_t,
+    data: svuint64_t,
+) {
+    svstnt1w_scatter_s64index_s64(pg, base.as_signed(), indices, data.as_signed())
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_u64index_s64(
+    pg: svbool_t,
+    base: *mut i16,
+    indices: svuint64_t,
+    data: svint64_t,
+) {
+    svstnt1h_scatter_s64index_s64(pg, base, indices.as_signed(), data)
+}
+#[doc = "Truncate to 32 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1w_scatter_u64index_s64(
+    pg: svbool_t,
+    base: *mut i32,
+    indices: svuint64_t,
+    data: svint64_t,
+) {
+    svstnt1w_scatter_s64index_s64(pg, base, indices.as_signed(), data)
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter_[u64]index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_u64index_u64(
+    pg: svbool_t,
+    base: *mut u16,
+    indices: svuint64_t,
+    data: svuint64_t,
+) {
+    svstnt1h_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed())
+}
+#[doc = "Truncate to 32 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter_[u64]index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1w_scatter_u64index_u64(
+    pg: svbool_t,
+    base: *mut u32,
+    indices: svuint64_t,
+    data: svuint64_t,
+) {
+    svstnt1w_scatter_s64index_s64(pg, base.as_signed(), indices.as_signed(), data.as_signed())
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_index[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_u32base_index_s32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+    data: svint32_t,
+) {
+    svstnt1h_scatter_u32base_offset_s32(pg, bases, index.unchecked_shl(1), data)
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u32base]_index[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_u32base_index_u32(
+    pg: svbool_t,
+    bases: svuint32_t,
+    index: i64,
+    data: svuint32_t,
+) {
+    svstnt1h_scatter_u32base_offset_u32(pg, bases, index.unchecked_shl(1), data)
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+    data: svint64_t,
+) {
+    svstnt1h_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(1), data)
+}
+#[doc = "Truncate to 32 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_index[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1w_scatter_u64base_index_s64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+    data: svint64_t,
+) {
+    svstnt1w_scatter_u64base_offset_s64(pg, bases, index.unchecked_shl(2), data)
+}
+#[doc = "Truncate to 16 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1h_scatter[_u64base]_index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1h))]
+pub unsafe fn svstnt1h_scatter_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+    data: svuint64_t,
+) {
+    svstnt1h_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(1), data)
+}
+#[doc = "Truncate to 32 bits and store, non-temporal"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svstnt1w_scatter[_u64base]_index[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."]
+#[doc = "  * This dereferences and accesses the calculated address for each active element (governed by `pg`)."]
+#[doc = "  * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::from_exposed_addr`]) on each lane before using it."]
+#[doc = "  * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(stnt1w))]
+pub unsafe fn svstnt1w_scatter_u64base_index_u64(
+    pg: svbool_t,
+    bases: svuint64_t,
+    index: i64,
+    data: svuint64_t,
+) {
+    svstnt1w_scatter_u64base_offset_u64(pg, bases, index.unchecked_shl(2), data)
+}
+#[doc = "Subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnb))]
+pub fn svsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv8i16")]
+        fn _svsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t;
+    }
+    unsafe { _svsubhnb_s16(op1, op2) }
+}
+#[doc = "Subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnb))]
+pub fn svsubhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t {
+    svsubhnb_s16(op1, svdup_n_s16(op2))
+}
+#[doc = "Subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnb))]
+pub fn svsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv4i32")]
+        fn _svsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t;
+    }
+    unsafe { _svsubhnb_s32(op1, op2) }
+}
+#[doc = "Subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnb))]
+pub fn svsubhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t {
+    svsubhnb_s32(op1, svdup_n_s32(op2))
+}
+#[doc = "Subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnb))]
+pub fn svsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnb.nxv2i64")]
+        fn _svsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t;
+    }
+    unsafe { _svsubhnb_s64(op1, op2) }
+}
+#[doc = "Subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnb))]
+pub fn svsubhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t {
+    svsubhnb_s64(op1, svdup_n_s64(op2))
+}
+#[doc = "Subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnb))]
+pub fn svsubhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t {
+    unsafe { svsubhnb_s16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnb))]
+pub fn svsubhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t {
+    svsubhnb_u16(op1, svdup_n_u16(op2))
+}
+#[doc = "Subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnb))]
+pub fn svsubhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t {
+    unsafe { svsubhnb_s32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnb))]
+pub fn svsubhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t {
+    svsubhnb_u32(op1, svdup_n_u32(op2))
+}
+#[doc = "Subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnb))]
+pub fn svsubhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t {
+    unsafe { svsubhnb_s64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract narrow high part (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnb))]
+pub fn svsubhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t {
+    svsubhnb_u64(op1, svdup_n_u64(op2))
+}
+#[doc = "Subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnt))]
+pub fn svsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv8i16")]
+        fn _svsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t;
+    }
+    unsafe { _svsubhnt_s16(even, op1, op2) }
+}
+#[doc = "Subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnt))]
+pub fn svsubhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t {
+    svsubhnt_s16(even, op1, svdup_n_s16(op2))
+}
+#[doc = "Subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnt))]
+pub fn svsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv4i32")]
+        fn _svsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_t;
+    }
+    unsafe { _svsubhnt_s32(even, op1, op2) }
+}
+#[doc = "Subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnt))]
+pub fn svsubhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t {
+    svsubhnt_s32(even, op1, svdup_n_s32(op2))
+}
+#[doc = "Subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnt))]
+pub fn svsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.subhnt.nxv2i64")]
+        fn _svsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_t;
+    }
+    unsafe { _svsubhnt_s64(even, op1, op2) }
+}
+#[doc = "Subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnt))]
+pub fn svsubhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t {
+    svsubhnt_s64(even, op1, svdup_n_s64(op2))
+}
+#[doc = "Subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnt))]
+pub fn svsubhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint8_t {
+    unsafe { svsubhnt_s16(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnt))]
+pub fn svsubhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t {
+    svsubhnt_u16(even, op1, svdup_n_u16(op2))
+}
+#[doc = "Subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnt))]
+pub fn svsubhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuint16_t {
+    unsafe { svsubhnt_s32(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnt))]
+pub fn svsubhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t {
+    svsubhnt_u32(even, op1, svdup_n_u32(op2))
+}
+#[doc = "Subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnt))]
+pub fn svsubhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuint32_t {
+    unsafe { svsubhnt_s64(even.as_signed(), op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract narrow high part (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(subhnt))]
+pub fn svsubhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t {
+    svsubhnt_u64(even, op1, svdup_n_u64(op2))
+}
+#[doc = "Subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssublb))]
+pub fn svsublb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv8i16")]
+        fn _svsublb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svsublb_s16(op1, op2) }
+}
+#[doc = "Subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssublb))]
+pub fn svsublb_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
+    svsublb_s16(op1, svdup_n_s8(op2))
+}
+#[doc = "Subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssublb))]
+pub fn svsublb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv4i32")]
+        fn _svsublb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svsublb_s32(op1, op2) }
+}
+#[doc = "Subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssublb))]
+pub fn svsublb_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
+    svsublb_s32(op1, svdup_n_s16(op2))
+}
+#[doc = "Subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssublb))]
+pub fn svsublb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublb.nxv2i64")]
+        fn _svsublb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svsublb_s64(op1, op2) }
+}
+#[doc = "Subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssublb))]
+pub fn svsublb_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
+    svsublb_s64(op1, svdup_n_s32(op2))
+}
+#[doc = "Subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usublb))]
+pub fn svsublb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv8i16")]
+        fn _svsublb_u16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svsublb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usublb))]
+pub fn svsublb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
+    svsublb_u16(op1, svdup_n_u8(op2))
+}
+#[doc = "Subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usublb))]
+pub fn svsublb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv4i32")]
+        fn _svsublb_u32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svsublb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usublb))]
+pub fn svsublb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t {
+    svsublb_u32(op1, svdup_n_u16(op2))
+}
+#[doc = "Subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usublb))]
+pub fn svsublb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublb.nxv2i64")]
+        fn _svsublb_u64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svsublb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract long (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usublb))]
+pub fn svsublb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
+    svsublb_u64(op1, svdup_n_u32(op2))
+}
+#[doc = "Subtract long (bottom - top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssublbt))]
+pub fn svsublbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ssublbt.nxv8i16"
+        )]
+        fn _svsublbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svsublbt_s16(op1, op2) }
+}
+#[doc = "Subtract long (bottom - top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssublbt))]
+pub fn svsublbt_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
+    svsublbt_s16(op1, svdup_n_s8(op2))
+}
+#[doc = "Subtract long (bottom - top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssublbt))]
+pub fn svsublbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ssublbt.nxv4i32"
+        )]
+        fn _svsublbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svsublbt_s32(op1, op2) }
+}
+#[doc = "Subtract long (bottom - top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssublbt))]
+pub fn svsublbt_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
+    svsublbt_s32(op1, svdup_n_s16(op2))
+}
+#[doc = "Subtract long (bottom - top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssublbt))]
+pub fn svsublbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ssublbt.nxv2i64"
+        )]
+        fn _svsublbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svsublbt_s64(op1, op2) }
+}
+#[doc = "Subtract long (bottom - top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssublbt))]
+pub fn svsublbt_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
+    svsublbt_s64(op1, svdup_n_s32(op2))
+}
+#[doc = "Subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssublt))]
+pub fn svsublt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv8i16")]
+        fn _svsublt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svsublt_s16(op1, op2) }
+}
+#[doc = "Subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssublt))]
+pub fn svsublt_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
+    svsublt_s16(op1, svdup_n_s8(op2))
+}
+#[doc = "Subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssublt))]
+pub fn svsublt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv4i32")]
+        fn _svsublt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svsublt_s32(op1, op2) }
+}
+#[doc = "Subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssublt))]
+pub fn svsublt_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
+    svsublt_s32(op1, svdup_n_s16(op2))
+}
+#[doc = "Subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssublt))]
+pub fn svsublt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssublt.nxv2i64")]
+        fn _svsublt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svsublt_s64(op1, op2) }
+}
+#[doc = "Subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssublt))]
+pub fn svsublt_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
+    svsublt_s64(op1, svdup_n_s32(op2))
+}
+#[doc = "Subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usublt))]
+pub fn svsublt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv8i16")]
+        fn _svsublt_u16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svsublt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usublt))]
+pub fn svsublt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t {
+    svsublt_u16(op1, svdup_n_u8(op2))
+}
+#[doc = "Subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usublt))]
+pub fn svsublt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv4i32")]
+        fn _svsublt_u32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svsublt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usublt))]
+pub fn svsublt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t {
+    svsublt_u32(op1, svdup_n_u16(op2))
+}
+#[doc = "Subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usublt))]
+pub fn svsublt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usublt.nxv2i64")]
+        fn _svsublt_u64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svsublt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract long (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usublt))]
+pub fn svsublt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t {
+    svsublt_u64(op1, svdup_n_u32(op2))
+}
+#[doc = "Subtract long (top - bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssubltb))]
+pub fn svsubltb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ssubltb.nxv8i16"
+        )]
+        fn _svsubltb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svsubltb_s16(op1, op2) }
+}
+#[doc = "Subtract long (top - bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssubltb))]
+pub fn svsubltb_n_s16(op1: svint8_t, op2: i8) -> svint16_t {
+    svsubltb_s16(op1, svdup_n_s8(op2))
+}
+#[doc = "Subtract long (top - bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssubltb))]
+pub fn svsubltb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ssubltb.nxv4i32"
+        )]
+        fn _svsubltb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svsubltb_s32(op1, op2) }
+}
+#[doc = "Subtract long (top - bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssubltb))]
+pub fn svsubltb_n_s32(op1: svint16_t, op2: i16) -> svint32_t {
+    svsubltb_s32(op1, svdup_n_s16(op2))
+}
+#[doc = "Subtract long (top - bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssubltb))]
+pub fn svsubltb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.ssubltb.nxv2i64"
+        )]
+        fn _svsubltb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svsubltb_s64(op1, op2) }
+}
+#[doc = "Subtract long (top - bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssubltb))]
+pub fn svsubltb_n_s64(op1: svint32_t, op2: i32) -> svint64_t {
+    svsubltb_s64(op1, svdup_n_s32(op2))
+}
+#[doc = "Subtract wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssubwb))]
+pub fn svsubwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv8i16")]
+        fn _svsubwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svsubwb_s16(op1, op2) }
+}
+#[doc = "Subtract wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssubwb))]
+pub fn svsubwb_n_s16(op1: svint16_t, op2: i8) -> svint16_t {
+    svsubwb_s16(op1, svdup_n_s8(op2))
+}
+#[doc = "Subtract wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssubwb))]
+pub fn svsubwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv4i32")]
+        fn _svsubwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svsubwb_s32(op1, op2) }
+}
+#[doc = "Subtract wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssubwb))]
+pub fn svsubwb_n_s32(op1: svint32_t, op2: i16) -> svint32_t {
+    svsubwb_s32(op1, svdup_n_s16(op2))
+}
+#[doc = "Subtract wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssubwb))]
+pub fn svsubwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwb.nxv2i64")]
+        fn _svsubwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svsubwb_s64(op1, op2) }
+}
+#[doc = "Subtract wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssubwb))]
+pub fn svsubwb_n_s64(op1: svint64_t, op2: i32) -> svint64_t {
+    svsubwb_s64(op1, svdup_n_s32(op2))
+}
+#[doc = "Subtract wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usubwb))]
+pub fn svsubwb_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv8i16")]
+        fn _svsubwb_u16(op1: svint16_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svsubwb_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usubwb))]
+pub fn svsubwb_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t {
+    svsubwb_u16(op1, svdup_n_u8(op2))
+}
+#[doc = "Subtract wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usubwb))]
+pub fn svsubwb_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv4i32")]
+        fn _svsubwb_u32(op1: svint32_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svsubwb_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usubwb))]
+pub fn svsubwb_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t {
+    svsubwb_u32(op1, svdup_n_u16(op2))
+}
+#[doc = "Subtract wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usubwb))]
+pub fn svsubwb_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwb.nxv2i64")]
+        fn _svsubwb_u64(op1: svint64_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svsubwb_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract wide (bottom)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usubwb))]
+pub fn svsubwb_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t {
+    svsubwb_u64(op1, svdup_n_u32(op2))
+}
+#[doc = "Subtract wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssubwt))]
+pub fn svsubwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv8i16")]
+        fn _svsubwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svsubwt_s16(op1, op2) }
+}
+#[doc = "Subtract wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssubwt))]
+pub fn svsubwt_n_s16(op1: svint16_t, op2: i8) -> svint16_t {
+    svsubwt_s16(op1, svdup_n_s8(op2))
+}
+#[doc = "Subtract wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssubwt))]
+pub fn svsubwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv4i32")]
+        fn _svsubwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svsubwt_s32(op1, op2) }
+}
+#[doc = "Subtract wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssubwt))]
+pub fn svsubwt_n_s32(op1: svint32_t, op2: i16) -> svint32_t {
+    svsubwt_s32(op1, svdup_n_s16(op2))
+}
+#[doc = "Subtract wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssubwt))]
+pub fn svsubwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.ssubwt.nxv2i64")]
+        fn _svsubwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svsubwt_s64(op1, op2) }
+}
+#[doc = "Subtract wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(ssubwt))]
+pub fn svsubwt_n_s64(op1: svint64_t, op2: i32) -> svint64_t {
+    svsubwt_s64(op1, svdup_n_s32(op2))
+}
+#[doc = "Subtract wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usubwt))]
+pub fn svsubwt_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv8i16")]
+        fn _svsubwt_u16(op1: svint16_t, op2: svint8_t) -> svint16_t;
+    }
+    unsafe { _svsubwt_u16(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usubwt))]
+pub fn svsubwt_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t {
+    svsubwt_u16(op1, svdup_n_u8(op2))
+}
+#[doc = "Subtract wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usubwt))]
+pub fn svsubwt_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv4i32")]
+        fn _svsubwt_u32(op1: svint32_t, op2: svint16_t) -> svint32_t;
+    }
+    unsafe { _svsubwt_u32(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usubwt))]
+pub fn svsubwt_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t {
+    svsubwt_u32(op1, svdup_n_u16(op2))
+}
+#[doc = "Subtract wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usubwt))]
+pub fn svsubwt_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.usubwt.nxv2i64")]
+        fn _svsubwt_u64(op1: svint64_t, op2: svint32_t) -> svint64_t;
+    }
+    unsafe { _svsubwt_u64(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Subtract wide (top)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(usubwt))]
+pub fn svsubwt_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t {
+    svsubwt_u64(op1, svdup_n_u32(op2))
+}
+#[doc = "Table lookup in two-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_f32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl2_f32(data: svfloat32x2_t, indices: svuint32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv4f32")]
+        fn _svtbl2_f32(data0: svfloat32_t, data1: svfloat32_t, indices: svint32_t) -> svfloat32_t;
+    }
+    unsafe {
+        _svtbl2_f32(
+            svget2_f32::<0>(data),
+            svget2_f32::<1>(data),
+            indices.as_signed(),
+        )
+    }
+}
+#[doc = "Table lookup in two-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_f64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl2_f64(data: svfloat64x2_t, indices: svuint64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv2f64")]
+        fn _svtbl2_f64(data0: svfloat64_t, data1: svfloat64_t, indices: svint64_t) -> svfloat64_t;
+    }
+    unsafe {
+        _svtbl2_f64(
+            svget2_f64::<0>(data),
+            svget2_f64::<1>(data),
+            indices.as_signed(),
+        )
+    }
+}
+#[doc = "Table lookup in two-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl2_s8(data: svint8x2_t, indices: svuint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv16i8")]
+        fn _svtbl2_s8(data0: svint8_t, data1: svint8_t, indices: svint8_t) -> svint8_t;
+    }
+    unsafe {
+        _svtbl2_s8(
+            svget2_s8::<0>(data),
+            svget2_s8::<1>(data),
+            indices.as_signed(),
+        )
+    }
+}
+#[doc = "Table lookup in two-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl2_s16(data: svint16x2_t, indices: svuint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv8i16")]
+        fn _svtbl2_s16(data0: svint16_t, data1: svint16_t, indices: svint16_t) -> svint16_t;
+    }
+    unsafe {
+        _svtbl2_s16(
+            svget2_s16::<0>(data),
+            svget2_s16::<1>(data),
+            indices.as_signed(),
+        )
+    }
+}
+#[doc = "Table lookup in two-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl2_s32(data: svint32x2_t, indices: svuint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv4i32")]
+        fn _svtbl2_s32(data0: svint32_t, data1: svint32_t, indices: svint32_t) -> svint32_t;
+    }
+    unsafe {
+        _svtbl2_s32(
+            svget2_s32::<0>(data),
+            svget2_s32::<1>(data),
+            indices.as_signed(),
+        )
+    }
+}
+#[doc = "Table lookup in two-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl2_s64(data: svint64x2_t, indices: svuint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbl2.nxv2i64")]
+        fn _svtbl2_s64(data0: svint64_t, data1: svint64_t, indices: svint64_t) -> svint64_t;
+    }
+    unsafe {
+        _svtbl2_s64(
+            svget2_s64::<0>(data),
+            svget2_s64::<1>(data),
+            indices.as_signed(),
+        )
+    }
+}
+#[doc = "Table lookup in two-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl2_u8(data: svuint8x2_t, indices: svuint8_t) -> svuint8_t {
+    unsafe { svtbl2_s8(data.as_signed(), indices).as_unsigned() }
+}
+#[doc = "Table lookup in two-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl2_u16(data: svuint16x2_t, indices: svuint16_t) -> svuint16_t {
+    unsafe { svtbl2_s16(data.as_signed(), indices).as_unsigned() }
+}
+#[doc = "Table lookup in two-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl2_u32(data: svuint32x2_t, indices: svuint32_t) -> svuint32_t {
+    unsafe { svtbl2_s32(data.as_signed(), indices).as_unsigned() }
+}
+#[doc = "Table lookup in two-vector table"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbl))]
+pub fn svtbl2_u64(data: svuint64x2_t, indices: svuint64_t) -> svuint64_t {
+    unsafe { svtbl2_s64(data.as_signed(), indices).as_unsigned() }
+}
+#[doc = "Table lookup in single-vector table (merging)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_f32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbx))]
+pub fn svtbx_f32(fallback: svfloat32_t, data: svfloat32_t, indices: svuint32_t) -> svfloat32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv4f32")]
+        fn _svtbx_f32(fallback: svfloat32_t, data: svfloat32_t, indices: svint32_t) -> svfloat32_t;
+    }
+    unsafe { _svtbx_f32(fallback, data, indices.as_signed()) }
+}
+#[doc = "Table lookup in single-vector table (merging)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_f64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbx))]
+pub fn svtbx_f64(fallback: svfloat64_t, data: svfloat64_t, indices: svuint64_t) -> svfloat64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv2f64")]
+        fn _svtbx_f64(fallback: svfloat64_t, data: svfloat64_t, indices: svint64_t) -> svfloat64_t;
+    }
+    unsafe { _svtbx_f64(fallback, data, indices.as_signed()) }
+}
+#[doc = "Table lookup in single-vector table (merging)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbx))]
+pub fn svtbx_s8(fallback: svint8_t, data: svint8_t, indices: svuint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv16i8")]
+        fn _svtbx_s8(fallback: svint8_t, data: svint8_t, indices: svint8_t) -> svint8_t;
+    }
+    unsafe { _svtbx_s8(fallback, data, indices.as_signed()) }
+}
+#[doc = "Table lookup in single-vector table (merging)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbx))]
+pub fn svtbx_s16(fallback: svint16_t, data: svint16_t, indices: svuint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv8i16")]
+        fn _svtbx_s16(fallback: svint16_t, data: svint16_t, indices: svint16_t) -> svint16_t;
+    }
+    unsafe { _svtbx_s16(fallback, data, indices.as_signed()) }
+}
+#[doc = "Table lookup in single-vector table (merging)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbx))]
+pub fn svtbx_s32(fallback: svint32_t, data: svint32_t, indices: svuint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv4i32")]
+        fn _svtbx_s32(fallback: svint32_t, data: svint32_t, indices: svint32_t) -> svint32_t;
+    }
+    unsafe { _svtbx_s32(fallback, data, indices.as_signed()) }
+}
+#[doc = "Table lookup in single-vector table (merging)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbx))]
+pub fn svtbx_s64(fallback: svint64_t, data: svint64_t, indices: svuint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.tbx.nxv2i64")]
+        fn _svtbx_s64(fallback: svint64_t, data: svint64_t, indices: svint64_t) -> svint64_t;
+    }
+    unsafe { _svtbx_s64(fallback, data, indices.as_signed()) }
+}
+#[doc = "Table lookup in single-vector table (merging)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbx))]
+pub fn svtbx_u8(fallback: svuint8_t, data: svuint8_t, indices: svuint8_t) -> svuint8_t {
+    unsafe { svtbx_s8(fallback.as_signed(), data.as_signed(), indices).as_unsigned() }
+}
+#[doc = "Table lookup in single-vector table (merging)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbx))]
+pub fn svtbx_u16(fallback: svuint16_t, data: svuint16_t, indices: svuint16_t) -> svuint16_t {
+    unsafe { svtbx_s16(fallback.as_signed(), data.as_signed(), indices).as_unsigned() }
+}
+#[doc = "Table lookup in single-vector table (merging)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbx))]
+pub fn svtbx_u32(fallback: svuint32_t, data: svuint32_t, indices: svuint32_t) -> svuint32_t {
+    unsafe { svtbx_s32(fallback.as_signed(), data.as_signed(), indices).as_unsigned() }
+}
+#[doc = "Table lookup in single-vector table (merging)"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(tbx))]
+pub fn svtbx_u64(fallback: svuint64_t, data: svuint64_t, indices: svuint64_t) -> svuint64_t {
+    unsafe { svtbx_s64(fallback.as_signed(), data.as_signed(), indices).as_unsigned() }
+}
+#[doc = "Unpack and extend high half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_b])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(punpkhi))]
+pub fn svunpkhi_b(op: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.punpkhi.nxv16i1"
+        )]
+        fn _svunpkhi_b(op: svbool_t) -> svbool8_t;
+    }
+    unsafe { _svunpkhi_b(op).into() }
+}
+#[doc = "Unpack and extend high half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sunpkhi))]
+pub fn svunpkhi_s16(op: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sunpkhi.nxv8i16"
+        )]
+        fn _svunpkhi_s16(op: svint8_t) -> svint16_t;
+    }
+    unsafe { _svunpkhi_s16(op) }
+}
+#[doc = "Unpack and extend high half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sunpkhi))]
+pub fn svunpkhi_s32(op: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sunpkhi.nxv4i32"
+        )]
+        fn _svunpkhi_s32(op: svint16_t) -> svint32_t;
+    }
+    unsafe { _svunpkhi_s32(op) }
+}
+#[doc = "Unpack and extend high half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sunpkhi))]
+pub fn svunpkhi_s64(op: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sunpkhi.nxv2i64"
+        )]
+        fn _svunpkhi_s64(op: svint32_t) -> svint64_t;
+    }
+    unsafe { _svunpkhi_s64(op) }
+}
+#[doc = "Unpack and extend high half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uunpkhi))]
+pub fn svunpkhi_u16(op: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uunpkhi.nxv8i16"
+        )]
+        fn _svunpkhi_u16(op: svint8_t) -> svint16_t;
+    }
+    unsafe { _svunpkhi_u16(op.as_signed()).as_unsigned() }
+}
+#[doc = "Unpack and extend high half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uunpkhi))]
+pub fn svunpkhi_u32(op: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uunpkhi.nxv4i32"
+        )]
+        fn _svunpkhi_u32(op: svint16_t) -> svint32_t;
+    }
+    unsafe { _svunpkhi_u32(op.as_signed()).as_unsigned() }
+}
+#[doc = "Unpack and extend high half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uunpkhi))]
+pub fn svunpkhi_u64(op: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uunpkhi.nxv2i64"
+        )]
+        fn _svunpkhi_u64(op: svint32_t) -> svint64_t;
+    }
+    unsafe { _svunpkhi_u64(op.as_signed()).as_unsigned() }
+}
+#[doc = "Unpack and extend low half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_b])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(punpklo))]
+pub fn svunpklo_b(op: svbool_t) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.punpklo.nxv16i1"
+        )]
+        fn _svunpklo_b(op: svbool_t) -> svbool8_t;
+    }
+    unsafe { _svunpklo_b(op).into() }
+}
+#[doc = "Unpack and extend low half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sunpklo))]
+pub fn svunpklo_s16(op: svint8_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sunpklo.nxv8i16"
+        )]
+        fn _svunpklo_s16(op: svint8_t) -> svint16_t;
+    }
+    unsafe { _svunpklo_s16(op) }
+}
+#[doc = "Unpack and extend low half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sunpklo))]
+pub fn svunpklo_s32(op: svint16_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sunpklo.nxv4i32"
+        )]
+        fn _svunpklo_s32(op: svint16_t) -> svint32_t;
+    }
+    unsafe { _svunpklo_s32(op) }
+}
+#[doc = "Unpack and extend low half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(sunpklo))]
+pub fn svunpklo_s64(op: svint32_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.sunpklo.nxv2i64"
+        )]
+        fn _svunpklo_s64(op: svint32_t) -> svint64_t;
+    }
+    unsafe { _svunpklo_s64(op) }
+}
+#[doc = "Unpack and extend low half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uunpklo))]
+pub fn svunpklo_u16(op: svuint8_t) -> svuint16_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uunpklo.nxv8i16"
+        )]
+        fn _svunpklo_u16(op: svint8_t) -> svint16_t;
+    }
+    unsafe { _svunpklo_u16(op.as_signed()).as_unsigned() }
+}
+#[doc = "Unpack and extend low half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uunpklo))]
+pub fn svunpklo_u32(op: svuint16_t) -> svuint32_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uunpklo.nxv4i32"
+        )]
+        fn _svunpklo_u32(op: svint16_t) -> svint32_t;
+    }
+    unsafe { _svunpklo_u32(op.as_signed()).as_unsigned() }
+}
+#[doc = "Unpack and extend low half"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(uunpklo))]
+pub fn svunpklo_u64(op: svuint32_t) -> svuint64_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.uunpklo.nxv2i64"
+        )]
+        fn _svunpklo_u64(op: svint32_t) -> svint64_t;
+    }
+    unsafe { _svunpklo_u64(op.as_signed()).as_unsigned() }
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv16i8")]
+        fn _svuqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t;
+    }
+    unsafe { _svuqadd_s8_m(pg, op1, op2.as_signed()) }
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t {
+    svuqadd_s8_m(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t {
+    svuqadd_s8_m(pg, op1, op2)
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t {
+    svuqadd_s8_x(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t {
+    svuqadd_s8_m(pg, svsel_s8(pg, op1, svdup_n_s8(0)), op2)
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t {
+    svuqadd_s8_z(pg, op1, svdup_n_u8(op2))
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv8i16")]
+        fn _svuqadd_s16_m(pg: svbool8_t, op1: svint16_t, op2: svint16_t) -> svint16_t;
+    }
+    unsafe { _svuqadd_s16_m(pg.into(), op1, op2.as_signed()) }
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t {
+    svuqadd_s16_m(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t {
+    svuqadd_s16_m(pg, op1, op2)
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t {
+    svuqadd_s16_x(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t {
+    svuqadd_s16_m(pg, svsel_s16(pg, op1, svdup_n_s16(0)), op2)
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t {
+    svuqadd_s16_z(pg, op1, svdup_n_u16(op2))
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv4i32")]
+        fn _svuqadd_s32_m(pg: svbool4_t, op1: svint32_t, op2: svint32_t) -> svint32_t;
+    }
+    unsafe { _svuqadd_s32_m(pg.into(), op1, op2.as_signed()) }
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t {
+    svuqadd_s32_m(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t {
+    svuqadd_s32_m(pg, op1, op2)
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t {
+    svuqadd_s32_x(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t {
+    svuqadd_s32_m(pg, svsel_s32(pg, op1, svdup_n_s32(0)), op2)
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t {
+    svuqadd_s32_z(pg, op1, svdup_n_u32(op2))
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t {
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.suqadd.nxv2i64")]
+        fn _svuqadd_s64_m(pg: svbool2_t, op1: svint64_t, op2: svint64_t) -> svint64_t;
+    }
+    unsafe { _svuqadd_s64_m(pg.into(), op1, op2.as_signed()) }
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_m)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t {
+    svuqadd_s64_m(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t {
+    svuqadd_s64_m(pg, op1, op2)
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_x)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t {
+    svuqadd_s64_x(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t {
+    svuqadd_s64_m(pg, svsel_s64(pg, op1, svdup_n_s64(0)), op2)
+}
+#[doc = "Saturating add with unsigned addend"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_z)"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(suqadd))]
+pub fn svuqadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t {
+    svuqadd_s64_z(pg, op1, svdup_n_u64(op2))
+}
+#[doc = "While decrementing scalar is greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilege))]
+pub fn svwhilege_b8_s32(op1: i32, op2: i32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilege.nxv16i1.i32"
+        )]
+        fn _svwhilege_b8_s32(op1: i32, op2: i32) -> svbool_t;
+    }
+    unsafe { _svwhilege_b8_s32(op1, op2) }
+}
+#[doc = "While decrementing scalar is greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilege))]
+pub fn svwhilege_b16_s32(op1: i32, op2: i32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilege.nxv8i1.i32"
+        )]
+        fn _svwhilege_b16_s32(op1: i32, op2: i32) -> svbool8_t;
+    }
+    unsafe { _svwhilege_b16_s32(op1, op2).into() }
+}
+#[doc = "While decrementing scalar is greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilege))]
+pub fn svwhilege_b32_s32(op1: i32, op2: i32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilege.nxv4i1.i32"
+        )]
+        fn _svwhilege_b32_s32(op1: i32, op2: i32) -> svbool4_t;
+    }
+    unsafe { _svwhilege_b32_s32(op1, op2).into() }
+}
+#[doc = "While decrementing scalar is greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilege))]
+pub fn svwhilege_b64_s32(op1: i32, op2: i32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilege.nxv2i1.i32"
+        )]
+        fn _svwhilege_b64_s32(op1: i32, op2: i32) -> svbool2_t;
+    }
+    unsafe { _svwhilege_b64_s32(op1, op2).into() }
+}
+#[doc = "While decrementing scalar is greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilege))]
+pub fn svwhilege_b8_s64(op1: i64, op2: i64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilege.nxv16i1.i64"
+        )]
+        fn _svwhilege_b8_s64(op1: i64, op2: i64) -> svbool_t;
+    }
+    unsafe { _svwhilege_b8_s64(op1, op2) }
+}
+#[doc = "While decrementing scalar is greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilege))]
+pub fn svwhilege_b16_s64(op1: i64, op2: i64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilege.nxv8i1.i64"
+        )]
+        fn _svwhilege_b16_s64(op1: i64, op2: i64) -> svbool8_t;
+    }
+    unsafe { _svwhilege_b16_s64(op1, op2).into() }
+}
+#[doc = "While decrementing scalar is greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilege))]
+pub fn svwhilege_b32_s64(op1: i64, op2: i64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilege.nxv4i1.i64"
+        )]
+        fn _svwhilege_b32_s64(op1: i64, op2: i64) -> svbool4_t;
+    }
+    unsafe { _svwhilege_b32_s64(op1, op2).into() }
+}
+#[doc = "While decrementing scalar is greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilege))]
+pub fn svwhilege_b64_s64(op1: i64, op2: i64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilege.nxv2i1.i64"
+        )]
+        fn _svwhilege_b64_s64(op1: i64, op2: i64) -> svbool2_t;
+    }
+    unsafe { _svwhilege_b64_s64(op1, op2).into() }
+}
+#[doc = "While decrementing scalar is greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilehs))]
+pub fn svwhilege_b8_u32(op1: u32, op2: u32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilehs.nxv16i1.i32"
+        )]
+        fn _svwhilege_b8_u32(op1: i32, op2: i32) -> svbool_t;
+    }
+    unsafe { _svwhilege_b8_u32(op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "While decrementing scalar is greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilehs))]
+pub fn svwhilege_b16_u32(op1: u32, op2: u32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilehs.nxv8i1.i32"
+        )]
+        fn _svwhilege_b16_u32(op1: i32, op2: i32) -> svbool8_t;
+    }
+    unsafe { _svwhilege_b16_u32(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While decrementing scalar is greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilehs))]
+pub fn svwhilege_b32_u32(op1: u32, op2: u32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilehs.nxv4i1.i32"
+        )]
+        fn _svwhilege_b32_u32(op1: i32, op2: i32) -> svbool4_t;
+    }
+    unsafe { _svwhilege_b32_u32(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While decrementing scalar is greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilehs))]
+pub fn svwhilege_b64_u32(op1: u32, op2: u32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilehs.nxv2i1.i32"
+        )]
+        fn _svwhilege_b64_u32(op1: i32, op2: i32) -> svbool2_t;
+    }
+    unsafe { _svwhilege_b64_u32(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While decrementing scalar is greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilehs))]
+pub fn svwhilege_b8_u64(op1: u64, op2: u64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilehs.nxv16i1.i64"
+        )]
+        fn _svwhilege_b8_u64(op1: i64, op2: i64) -> svbool_t;
+    }
+    unsafe { _svwhilege_b8_u64(op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "While decrementing scalar is greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilehs))]
+pub fn svwhilege_b16_u64(op1: u64, op2: u64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilehs.nxv8i1.i64"
+        )]
+        fn _svwhilege_b16_u64(op1: i64, op2: i64) -> svbool8_t;
+    }
+    unsafe { _svwhilege_b16_u64(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While decrementing scalar is greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilehs))]
+pub fn svwhilege_b32_u64(op1: u64, op2: u64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilehs.nxv4i1.i64"
+        )]
+        fn _svwhilege_b32_u64(op1: i64, op2: i64) -> svbool4_t;
+    }
+    unsafe { _svwhilege_b32_u64(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While decrementing scalar is greater than or equal to"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilehs))]
+pub fn svwhilege_b64_u64(op1: u64, op2: u64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilehs.nxv2i1.i64"
+        )]
+        fn _svwhilege_b64_u64(op1: i64, op2: i64) -> svbool2_t;
+    }
+    unsafe { _svwhilege_b64_u64(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While decrementing scalar is greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilegt))]
+pub fn svwhilegt_b8_s32(op1: i32, op2: i32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilegt.nxv16i1.i32"
+        )]
+        fn _svwhilegt_b8_s32(op1: i32, op2: i32) -> svbool_t;
+    }
+    unsafe { _svwhilegt_b8_s32(op1, op2) }
+}
+#[doc = "While decrementing scalar is greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilegt))]
+pub fn svwhilegt_b16_s32(op1: i32, op2: i32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilegt.nxv8i1.i32"
+        )]
+        fn _svwhilegt_b16_s32(op1: i32, op2: i32) -> svbool8_t;
+    }
+    unsafe { _svwhilegt_b16_s32(op1, op2).into() }
+}
+#[doc = "While decrementing scalar is greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilegt))]
+pub fn svwhilegt_b32_s32(op1: i32, op2: i32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilegt.nxv4i1.i32"
+        )]
+        fn _svwhilegt_b32_s32(op1: i32, op2: i32) -> svbool4_t;
+    }
+    unsafe { _svwhilegt_b32_s32(op1, op2).into() }
+}
+#[doc = "While decrementing scalar is greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilegt))]
+pub fn svwhilegt_b64_s32(op1: i32, op2: i32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilegt.nxv2i1.i32"
+        )]
+        fn _svwhilegt_b64_s32(op1: i32, op2: i32) -> svbool2_t;
+    }
+    unsafe { _svwhilegt_b64_s32(op1, op2).into() }
+}
+#[doc = "While decrementing scalar is greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilegt))]
+pub fn svwhilegt_b8_s64(op1: i64, op2: i64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilegt.nxv16i1.i64"
+        )]
+        fn _svwhilegt_b8_s64(op1: i64, op2: i64) -> svbool_t;
+    }
+    unsafe { _svwhilegt_b8_s64(op1, op2) }
+}
+#[doc = "While decrementing scalar is greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilegt))]
+pub fn svwhilegt_b16_s64(op1: i64, op2: i64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilegt.nxv8i1.i64"
+        )]
+        fn _svwhilegt_b16_s64(op1: i64, op2: i64) -> svbool8_t;
+    }
+    unsafe { _svwhilegt_b16_s64(op1, op2).into() }
+}
+#[doc = "While decrementing scalar is greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilegt))]
+pub fn svwhilegt_b32_s64(op1: i64, op2: i64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilegt.nxv4i1.i64"
+        )]
+        fn _svwhilegt_b32_s64(op1: i64, op2: i64) -> svbool4_t;
+    }
+    unsafe { _svwhilegt_b32_s64(op1, op2).into() }
+}
+#[doc = "While decrementing scalar is greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilegt))]
+pub fn svwhilegt_b64_s64(op1: i64, op2: i64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilegt.nxv2i1.i64"
+        )]
+        fn _svwhilegt_b64_s64(op1: i64, op2: i64) -> svbool2_t;
+    }
+    unsafe { _svwhilegt_b64_s64(op1, op2).into() }
+}
+#[doc = "While decrementing scalar is greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilehi))]
+pub fn svwhilegt_b8_u32(op1: u32, op2: u32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilehi.nxv16i1.i32"
+        )]
+        fn _svwhilegt_b8_u32(op1: i32, op2: i32) -> svbool_t;
+    }
+    unsafe { _svwhilegt_b8_u32(op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "While decrementing scalar is greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilehi))]
+pub fn svwhilegt_b16_u32(op1: u32, op2: u32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilehi.nxv8i1.i32"
+        )]
+        fn _svwhilegt_b16_u32(op1: i32, op2: i32) -> svbool8_t;
+    }
+    unsafe { _svwhilegt_b16_u32(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While decrementing scalar is greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilehi))]
+pub fn svwhilegt_b32_u32(op1: u32, op2: u32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilehi.nxv4i1.i32"
+        )]
+        fn _svwhilegt_b32_u32(op1: i32, op2: i32) -> svbool4_t;
+    }
+    unsafe { _svwhilegt_b32_u32(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While decrementing scalar is greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilehi))]
+pub fn svwhilegt_b64_u32(op1: u32, op2: u32) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilehi.nxv2i1.i32"
+        )]
+        fn _svwhilegt_b64_u32(op1: i32, op2: i32) -> svbool2_t;
+    }
+    unsafe { _svwhilegt_b64_u32(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While decrementing scalar is greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilehi))]
+pub fn svwhilegt_b8_u64(op1: u64, op2: u64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilehi.nxv16i1.i64"
+        )]
+        fn _svwhilegt_b8_u64(op1: i64, op2: i64) -> svbool_t;
+    }
+    unsafe { _svwhilegt_b8_u64(op1.as_signed(), op2.as_signed()) }
+}
+#[doc = "While decrementing scalar is greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilehi))]
+pub fn svwhilegt_b16_u64(op1: u64, op2: u64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilehi.nxv8i1.i64"
+        )]
+        fn _svwhilegt_b16_u64(op1: i64, op2: i64) -> svbool8_t;
+    }
+    unsafe { _svwhilegt_b16_u64(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While decrementing scalar is greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilehi))]
+pub fn svwhilegt_b32_u64(op1: u64, op2: u64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilehi.nxv4i1.i64"
+        )]
+        fn _svwhilegt_b32_u64(op1: i64, op2: i64) -> svbool4_t;
+    }
+    unsafe { _svwhilegt_b32_u64(op1.as_signed(), op2.as_signed()).into() }
+}
+#[doc = "While decrementing scalar is greater than"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilehi))]
+pub fn svwhilegt_b64_u64(op1: u64, op2: u64) -> svbool_t {
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilehi.nxv2i1.i64"
+        )]
+        fn _svwhilegt_b64_u64(op1: i64, op2: i64) -> svbool2_t;
+    }
+    unsafe { _svwhilegt_b64_u64(op1.as_signed(), op2.as_signed()).into() }
+}
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+unsafe fn svwhilerw_8ptr<T>(op1: *const T, op2: *const T) -> svbool_t {
+    let op1 = op1 as *const crate::ffi::c_void;
+    let op2 = op2 as *const crate::ffi::c_void;
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilerw.b.nxv16i1.p0"
+        )]
+        fn _svwhilerw_8ptr(
+            op1: *const crate::ffi::c_void,
+            op2: *const crate::ffi::c_void,
+        ) -> svbool_t;
+    }
+    _svwhilerw_8ptr(op1, op2)
+}
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+unsafe fn svwhilerw_16ptr<T>(op1: *const T, op2: *const T) -> svbool_t {
+    let op1 = op1 as *const crate::ffi::c_void;
+    let op2 = op2 as *const crate::ffi::c_void;
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilerw.h.nxv8i1.p0"
+        )]
+        fn _svwhilerw_16ptr(
+            op1: *const crate::ffi::c_void,
+            op2: *const crate::ffi::c_void,
+        ) -> svbool8_t;
+    }
+    _svwhilerw_16ptr(op1, op2).into()
+}
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+unsafe fn svwhilerw_32ptr<T>(op1: *const T, op2: *const T) -> svbool_t {
+    let op1 = op1 as *const crate::ffi::c_void;
+    let op2 = op2 as *const crate::ffi::c_void;
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilerw.s.nxv4i1.p0"
+        )]
+        fn _svwhilerw_32ptr(
+            op1: *const crate::ffi::c_void,
+            op2: *const crate::ffi::c_void,
+        ) -> svbool4_t;
+    }
+    _svwhilerw_32ptr(op1, op2).into()
+}
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+unsafe fn svwhilerw_64ptr<T>(op1: *const T, op2: *const T) -> svbool_t {
+    let op1 = op1 as *const crate::ffi::c_void;
+    let op2 = op2 as *const crate::ffi::c_void;
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilerw.d.nxv2i1.p0"
+        )]
+        fn _svwhilerw_64ptr(
+            op1: *const crate::ffi::c_void,
+            op2: *const crate::ffi::c_void,
+        ) -> svbool2_t;
+    }
+    _svwhilerw_64ptr(op1, op2).into()
+}
+#[doc = "While free of read-after-write conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilerw))]
+pub unsafe fn svwhilerw_f32(op1: *const f32, op2: *const f32) -> svbool_t {
+    svwhilerw_32ptr::<f32>(op1, op2)
+}
+#[doc = "While free of read-after-write conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilerw))]
+pub unsafe fn svwhilerw_f64(op1: *const f64, op2: *const f64) -> svbool_t {
+    svwhilerw_64ptr::<f64>(op1, op2)
+}
+#[doc = "While free of read-after-write conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilerw))]
+pub unsafe fn svwhilerw_s8(op1: *const i8, op2: *const i8) -> svbool_t {
+    svwhilerw_8ptr::<i8>(op1, op2)
+}
+#[doc = "While free of read-after-write conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilerw))]
+pub unsafe fn svwhilerw_s16(op1: *const i16, op2: *const i16) -> svbool_t {
+    svwhilerw_16ptr::<i16>(op1, op2)
+}
+#[doc = "While free of read-after-write conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilerw))]
+pub unsafe fn svwhilerw_s32(op1: *const i32, op2: *const i32) -> svbool_t {
+    svwhilerw_32ptr::<i32>(op1, op2)
+}
+#[doc = "While free of read-after-write conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilerw))]
+pub unsafe fn svwhilerw_s64(op1: *const i64, op2: *const i64) -> svbool_t {
+    svwhilerw_64ptr::<i64>(op1, op2)
+}
+#[doc = "While free of read-after-write conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilerw))]
+pub unsafe fn svwhilerw_u8(op1: *const u8, op2: *const u8) -> svbool_t {
+    svwhilerw_8ptr::<u8>(op1, op2)
+}
+#[doc = "While free of read-after-write conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilerw))]
+pub unsafe fn svwhilerw_u16(op1: *const u16, op2: *const u16) -> svbool_t {
+    svwhilerw_16ptr::<u16>(op1, op2)
+}
+#[doc = "While free of read-after-write conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilerw))]
+pub unsafe fn svwhilerw_u32(op1: *const u32, op2: *const u32) -> svbool_t {
+    svwhilerw_32ptr::<u32>(op1, op2)
+}
+#[doc = "While free of read-after-write conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilerw))]
+pub unsafe fn svwhilerw_u64(op1: *const u64, op2: *const u64) -> svbool_t {
+    svwhilerw_64ptr::<u64>(op1, op2)
+}
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+unsafe fn svwhilewr_8ptr<T>(op1: *const T, op2: *const T) -> svbool_t {
+    let op1 = op1 as *const crate::ffi::c_void;
+    let op2 = op2 as *const crate::ffi::c_void;
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilewr.b.nxv16i1.p0"
+        )]
+        fn _svwhilewr_8ptr(
+            op1: *const crate::ffi::c_void,
+            op2: *const crate::ffi::c_void,
+        ) -> svbool_t;
+    }
+    _svwhilewr_8ptr(op1, op2)
+}
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+unsafe fn svwhilewr_16ptr<T>(op1: *const T, op2: *const T) -> svbool_t {
+    let op1 = op1 as *const crate::ffi::c_void;
+    let op2 = op2 as *const crate::ffi::c_void;
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilewr.h.nxv8i1.p0"
+        )]
+        fn _svwhilewr_16ptr(
+            op1: *const crate::ffi::c_void,
+            op2: *const crate::ffi::c_void,
+        ) -> svbool8_t;
+    }
+    _svwhilewr_16ptr(op1, op2).into()
+}
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+unsafe fn svwhilewr_32ptr<T>(op1: *const T, op2: *const T) -> svbool_t {
+    let op1 = op1 as *const crate::ffi::c_void;
+    let op2 = op2 as *const crate::ffi::c_void;
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilewr.s.nxv4i1.p0"
+        )]
+        fn _svwhilewr_32ptr(
+            op1: *const crate::ffi::c_void,
+            op2: *const crate::ffi::c_void,
+        ) -> svbool4_t;
+    }
+    _svwhilewr_32ptr(op1, op2).into()
+}
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+unsafe fn svwhilewr_64ptr<T>(op1: *const T, op2: *const T) -> svbool_t {
+    let op1 = op1 as *const crate::ffi::c_void;
+    let op2 = op2 as *const crate::ffi::c_void;
+    extern "C" {
+        #[cfg_attr(
+            target_arch = "aarch64",
+            link_name = "llvm.aarch64.sve.whilewr.d.nxv2i1.p0"
+        )]
+        fn _svwhilewr_64ptr(
+            op1: *const crate::ffi::c_void,
+            op2: *const crate::ffi::c_void,
+        ) -> svbool2_t;
+    }
+    _svwhilewr_64ptr(op1, op2).into()
+}
+#[doc = "While free of write-after-read conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_f32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilewr))]
+pub unsafe fn svwhilewr_f32(op1: *const f32, op2: *const f32) -> svbool_t {
+    svwhilewr_32ptr::<f32>(op1, op2)
+}
+#[doc = "While free of write-after-read conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_f64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilewr))]
+pub unsafe fn svwhilewr_f64(op1: *const f64, op2: *const f64) -> svbool_t {
+    svwhilewr_64ptr::<f64>(op1, op2)
+}
+#[doc = "While free of write-after-read conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilewr))]
+pub unsafe fn svwhilewr_s8(op1: *const i8, op2: *const i8) -> svbool_t {
+    svwhilewr_8ptr::<i8>(op1, op2)
+}
+#[doc = "While free of write-after-read conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilewr))]
+pub unsafe fn svwhilewr_s16(op1: *const i16, op2: *const i16) -> svbool_t {
+    svwhilewr_16ptr::<i16>(op1, op2)
+}
+#[doc = "While free of write-after-read conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilewr))]
+pub unsafe fn svwhilewr_s32(op1: *const i32, op2: *const i32) -> svbool_t {
+    svwhilewr_32ptr::<i32>(op1, op2)
+}
+#[doc = "While free of write-after-read conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilewr))]
+pub unsafe fn svwhilewr_s64(op1: *const i64, op2: *const i64) -> svbool_t {
+    svwhilewr_64ptr::<i64>(op1, op2)
+}
+#[doc = "While free of write-after-read conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u8])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilewr))]
+pub unsafe fn svwhilewr_u8(op1: *const u8, op2: *const u8) -> svbool_t {
+    svwhilewr_8ptr::<u8>(op1, op2)
+}
+#[doc = "While free of write-after-read conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u16])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilewr))]
+pub unsafe fn svwhilewr_u16(op1: *const u16, op2: *const u16) -> svbool_t {
+    svwhilewr_16ptr::<u16>(op1, op2)
+}
+#[doc = "While free of write-after-read conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u32])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilewr))]
+pub unsafe fn svwhilewr_u32(op1: *const u32, op2: *const u32) -> svbool_t {
+    svwhilewr_32ptr::<u32>(op1, op2)
+}
+#[doc = "While free of write-after-read conflicts"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u64])"]
+#[doc = ""]
+#[doc = "## Safety"]
+#[doc = "  * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(whilewr))]
+pub unsafe fn svwhilewr_u64(op1: *const u64, op2: *const u64) -> svbool_t {
+    svwhilewr_64ptr::<u64>(op1, op2)
+}
+#[doc = "Bitwise exclusive OR and rotate right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(xar, IMM3 = 1))]
+pub fn svxar_n_s8<const IMM3: i32>(op1: svint8_t, op2: svint8_t) -> svint8_t {
+    static_assert_range!(IMM3, 1, 8);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv16i8")]
+        fn _svxar_n_s8(op1: svint8_t, op2: svint8_t, imm3: i32) -> svint8_t;
+    }
+    unsafe { _svxar_n_s8(op1, op2, IMM3) }
+}
+#[doc = "Bitwise exclusive OR and rotate right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(xar, IMM3 = 1))]
+pub fn svxar_n_s16<const IMM3: i32>(op1: svint16_t, op2: svint16_t) -> svint16_t {
+    static_assert_range!(IMM3, 1, 16);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv8i16")]
+        fn _svxar_n_s16(op1: svint16_t, op2: svint16_t, imm3: i32) -> svint16_t;
+    }
+    unsafe { _svxar_n_s16(op1, op2, IMM3) }
+}
+#[doc = "Bitwise exclusive OR and rotate right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(xar, IMM3 = 1))]
+pub fn svxar_n_s32<const IMM3: i32>(op1: svint32_t, op2: svint32_t) -> svint32_t {
+    static_assert_range!(IMM3, 1, 32);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv4i32")]
+        fn _svxar_n_s32(op1: svint32_t, op2: svint32_t, imm3: i32) -> svint32_t;
+    }
+    unsafe { _svxar_n_s32(op1, op2, IMM3) }
+}
+#[doc = "Bitwise exclusive OR and rotate right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(xar, IMM3 = 1))]
+pub fn svxar_n_s64<const IMM3: i32>(op1: svint64_t, op2: svint64_t) -> svint64_t {
+    static_assert_range!(IMM3, 1, 64);
+    extern "C" {
+        #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.xar.nxv2i64")]
+        fn _svxar_n_s64(op1: svint64_t, op2: svint64_t, imm3: i32) -> svint64_t;
+    }
+    unsafe { _svxar_n_s64(op1, op2, IMM3) }
+}
+#[doc = "Bitwise exclusive OR and rotate right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u8])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(xar, IMM3 = 1))]
+pub fn svxar_n_u8<const IMM3: i32>(op1: svuint8_t, op2: svuint8_t) -> svuint8_t {
+    static_assert_range!(IMM3, 1, 8);
+    unsafe { svxar_n_s8::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise exclusive OR and rotate right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u16])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(xar, IMM3 = 1))]
+pub fn svxar_n_u16<const IMM3: i32>(op1: svuint16_t, op2: svuint16_t) -> svuint16_t {
+    static_assert_range!(IMM3, 1, 16);
+    unsafe { svxar_n_s16::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise exclusive OR and rotate right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u32])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(xar, IMM3 = 1))]
+pub fn svxar_n_u32<const IMM3: i32>(op1: svuint32_t, op2: svuint32_t) -> svuint32_t {
+    static_assert_range!(IMM3, 1, 32);
+    unsafe { svxar_n_s32::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
+#[doc = "Bitwise exclusive OR and rotate right"]
+#[doc = ""]
+#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u64])"]
+#[inline]
+#[target_feature(enable = "sve,sve2")]
+#[cfg_attr(test, assert_instr(xar, IMM3 = 1))]
+pub fn svxar_n_u64<const IMM3: i32>(op1: svuint64_t, op2: svuint64_t) -> svuint64_t {
+    static_assert_range!(IMM3, 1, 64);
+    unsafe { svxar_n_s64::<IMM3>(op1.as_signed(), op2.as_signed()).as_unsigned() }
+}
diff --git a/crates/core_arch/src/aarch64/sve/types.rs b/crates/core_arch/src/aarch64/sve/types.rs
new file mode 100644
index 0000000000..e851a0d762
--- /dev/null
+++ b/crates/core_arch/src/aarch64/sve/types.rs
@@ -0,0 +1,285 @@
+use super::*;
+#[allow(improper_ctypes)]
+use crate::marker::ConstParamTy;
+
+pub(super) trait AsUnsigned {
+    type Unsigned: ?Sized;
+    unsafe fn as_unsigned(self) -> Self::Unsigned;
+}
+
+pub(super) trait AsSigned {
+    type Signed: ?Sized;
+    unsafe fn as_signed(self) -> Self::Signed;
+}
+
+pub(super) trait Into<T: ?Sized> {
+    fn into(self) -> T;
+}
+
+macro_rules! impl_sve_type {
+    ($(($v:vis, $elem_type:ty, $name:ident, $elt:literal))*) => ($(
+        #[repr(simd, scalable($elt))]
+        #[allow(non_camel_case_types)]
+        $v struct $name {
+            _ty: [$elem_type],
+        }
+    )*)
+}
+
+macro_rules! impl_sign_conversions_sv {
+    ($(($signed:ty, $unsigned:ty))*) => ($(
+        impl AsUnsigned for $signed {
+            type Unsigned = $unsigned;
+
+            #[inline]
+            #[target_feature(enable = "sve")]
+            unsafe fn as_unsigned(self) -> $unsigned {
+                simd_reinterpret(self)
+            }
+        }
+
+        impl AsSigned for $unsigned {
+            type Signed = $signed;
+
+            #[inline]
+            #[target_feature(enable = "sve")]
+            unsafe fn as_signed(self) -> $signed {
+                simd_reinterpret(self)
+            }
+        }
+    )*)
+}
+
+macro_rules! impl_sign_conversions {
+    ($(($signed:ty, $unsigned:ty))*) => ($(
+        impl AsUnsigned for $signed {
+            type Unsigned = $unsigned;
+
+            #[inline]
+            #[target_feature(enable = "sve")]
+            unsafe fn as_unsigned(self) -> $unsigned {
+                crate::mem::transmute(self)
+            }
+        }
+
+        impl AsSigned for $unsigned {
+            type Signed = $signed;
+
+            #[inline]
+            #[target_feature(enable = "sve")]
+            unsafe fn as_signed(self) -> $signed {
+                crate::mem::transmute(self)
+            }
+        }
+    )*)
+}
+
+/// LLVM requires the predicate lane count to be the same as the lane count
+/// it's working with. However the ACLE only defines one bool type and the
+/// instruction set doesn't have this distinction. As a result we have to
+/// create these internal types so we can match the LLVM signature. Each of
+/// these internal types can be converted to the public `svbool_t` type and
+/// the `svbool_t` type can be converted into these.
+macro_rules! impl_internal_sve_predicate {
+    ($(($name:ident, $elt:literal))*) => ($(
+        #[repr(simd, scalable($elt))]
+        #[allow(non_camel_case_types)]
+        pub(super) struct $name {
+            _ty: [bool],
+        }
+
+        impl Into<svbool_t> for $name {
+            #[inline(always)]
+            fn into(self) -> svbool_t {
+                #[allow(improper_ctypes)]
+                extern "C" {
+                    #[cfg_attr(
+                        target_arch = "aarch64",
+                        link_name = concat!("llvm.aarch64.sve.convert.to.svbool.nxv", $elt, "i1")
+                    )]
+                    fn convert_to_svbool(b: $name) -> svbool_t;
+                }
+                unsafe { convert_to_svbool(self) }
+            }
+        }
+
+        impl Into<$name> for svbool_t {
+            #[inline(always)]
+            fn into(self) -> $name {
+                #[allow(improper_ctypes)]
+                extern "C" {
+                    #[cfg_attr(
+                        target_arch = "aarch64",
+                        link_name = concat!("llvm.aarch64.sve.convert.from.svbool.nxv", $elt, "i1")
+                    )]
+                    fn convert_from_svbool(b: svbool_t) -> $name;
+                }
+                unsafe { convert_from_svbool(self) }
+            }
+        }
+    )*)
+}
+
+impl_sve_type! {
+    (pub, bool, svbool_t, 16)
+
+    (pub, i8, svint8_t, 16)
+    (pub, u8, svuint8_t, 16)
+
+    (pub, i16, svint16_t, 8)
+    (pub, u16, svuint16_t, 8)
+    (pub, f32, svfloat32_t, 4)
+    (pub, i32, svint32_t, 4)
+    (pub, u32, svuint32_t, 4)
+    (pub, f64, svfloat64_t, 2)
+    (pub, i64, svint64_t, 2)
+    (pub, u64, svuint64_t, 2)
+
+    (pub, i8, svint8x2_t, 32)
+    (pub, u8, svuint8x2_t, 32)
+    (pub, i16, svint16x2_t, 16)
+    (pub, u16, svuint16x2_t, 16)
+    (pub, f32, svfloat32x2_t, 8)
+    (pub, i32, svint32x2_t, 8)
+    (pub, u32, svuint32x2_t, 8)
+    (pub, f64, svfloat64x2_t, 4)
+    (pub, i64, svint64x2_t, 4)
+    (pub, u64, svuint64x2_t, 4)
+
+    (pub, i8, svint8x3_t, 48)
+    (pub, u8, svuint8x3_t, 48)
+    (pub, i16, svint16x3_t, 24)
+    (pub, u16, svuint16x3_t, 24)
+    (pub, f32, svfloat32x3_t, 12)
+    (pub, i32, svint32x3_t, 12)
+    (pub, u32, svuint32x3_t, 12)
+    (pub, f64, svfloat64x3_t, 6)
+    (pub, i64, svint64x3_t, 6)
+    (pub, u64, svuint64x3_t, 6)
+
+    (pub, i8, svint8x4_t, 64)
+    (pub, u8, svuint8x4_t, 64)
+    (pub, i16, svint16x4_t, 32)
+    (pub, u16, svuint16x4_t, 32)
+    (pub, f32, svfloat32x4_t, 16)
+    (pub, i32, svint32x4_t, 16)
+    (pub, u32, svuint32x4_t, 16)
+    (pub, f64, svfloat64x4_t, 8)
+    (pub, i64, svint64x4_t, 8)
+    (pub, u64, svuint64x4_t, 8)
+
+// Internal types:
+    (pub(super), i8, nxv2i8, 2)
+    (pub(super), i8, nxv4i8, 4)
+    (pub(super), i8, nxv8i8, 8)
+
+    (pub(super), i16, nxv2i16, 2)
+    (pub(super), i16, nxv4i16, 4)
+
+    (pub(super), i32, nxv2i32, 2)
+
+    (pub(super), u8, nxv2u8, 2)
+    (pub(super), u8, nxv4u8, 4)
+    (pub(super), u8, nxv8u8, 8)
+
+    (pub(super), u16, nxv2u16, 2)
+    (pub(super), u16, nxv4u16, 4)
+
+    (pub(super), u32, nxv2u32, 2)
+}
+
+impl_sign_conversions! {
+    (i8, u8)
+    (i16, u16)
+    (i32, u32)
+    (i64, u64)
+    (*const i8, *const u8)
+    (*const i16, *const u16)
+    (*const i32, *const u32)
+    (*const i64, *const u64)
+    (*mut i8, *mut u8)
+    (*mut i16, *mut u16)
+    (*mut i32, *mut u32)
+    (*mut i64, *mut u64)
+}
+
+impl_sign_conversions_sv! {
+    (svint8_t, svuint8_t)
+    (svint16_t, svuint16_t)
+    (svint32_t, svuint32_t)
+    (svint64_t, svuint64_t)
+
+    (svint8x2_t, svuint8x2_t)
+    (svint16x2_t, svuint16x2_t)
+    (svint32x2_t, svuint32x2_t)
+    (svint64x2_t, svuint64x2_t)
+
+    (svint8x3_t, svuint8x3_t)
+    (svint16x3_t, svuint16x3_t)
+    (svint32x3_t, svuint32x3_t)
+    (svint64x3_t, svuint64x3_t)
+
+    (svint8x4_t, svuint8x4_t)
+    (svint16x4_t, svuint16x4_t)
+    (svint32x4_t, svuint32x4_t)
+    (svint64x4_t, svuint64x4_t)
+
+    // Internal types:
+    (nxv2i8, nxv2u8)
+    (nxv4i8, nxv4u8)
+    (nxv8i8, nxv8u8)
+
+    (nxv2i16, nxv2u16)
+    (nxv4i16, nxv4u16)
+
+    (nxv2i32, nxv2u32)
+}
+
+impl_internal_sve_predicate! {
+    (svbool2_t, 2)
+    (svbool4_t, 4)
+    (svbool8_t, 8)
+}
+
+#[repr(i32)]
+#[allow(non_camel_case_types)]
+#[derive(Clone, Copy, PartialEq, Eq, ConstParamTy)]
+#[non_exhaustive]
+pub enum svpattern {
+    SV_POW2 = 0,
+    SV_VL1 = 1,
+    SV_VL2 = 2,
+    SV_VL3 = 3,
+    SV_VL4 = 4,
+    SV_VL5 = 5,
+    SV_VL6 = 6,
+    SV_VL7 = 7,
+    SV_VL8 = 8,
+    SV_VL16 = 9,
+    SV_VL32 = 10,
+    SV_VL64 = 11,
+    SV_VL128 = 12,
+    SV_VL256 = 13,
+    SV_MUL4 = 29,
+    SV_MUL3 = 30,
+    SV_ALL = 31,
+}
+
+#[repr(i32)]
+#[allow(non_camel_case_types)]
+#[derive(Clone, Copy, PartialEq, Eq, ConstParamTy)]
+#[non_exhaustive]
+pub enum svprfop {
+    SV_PLDL1KEEP = 0,
+    SV_PLDL1STRM = 1,
+    SV_PLDL2KEEP = 2,
+    SV_PLDL2STRM = 3,
+    SV_PLDL3KEEP = 4,
+    SV_PLDL3STRM = 5,
+    SV_PSTL1KEEP = 8,
+    SV_PSTL1STRM = 9,
+    SV_PSTL2KEEP = 10,
+    SV_PSTL2STRM = 11,
+    SV_PSTL3KEEP = 12,
+    SV_PSTL3STRM = 13,
+}
diff --git a/crates/core_arch/src/lib.rs b/crates/core_arch/src/lib.rs
index bd4de67445..70ffcc75ec 100644
--- a/crates/core_arch/src/lib.rs
+++ b/crates/core_arch/src/lib.rs
@@ -3,11 +3,17 @@
 #![allow(dead_code)]
 #![allow(unused_features)]
 #![allow(internal_features)]
+// TODO: We need this for adt_const_params, which is currently marked as incomplete. Our usage is
+// very simple, passing unit-only enums to SVE intrinsics, and the user benefits of passing
+// symbolic values is significant. However, we should remove this allow() once adt_const_params is
+// completed, or consider using plain integers if more problems are found.
+#![allow(incomplete_features)]
 #![deny(rust_2018_idioms)]
 #![feature(
     custom_inner_attributes,
     link_llvm_intrinsics,
     platform_intrinsics,
+    repr_scalable,
     repr_simd,
     simd_ffi,
     proc_macro_hygiene,
@@ -33,9 +39,14 @@
     asm_const,
     target_feature_11,
     inline_const,
-    generic_arg_infer
+    generic_arg_infer,
+    adt_const_params,
+    unsized_fn_params,
+    unsized_locals,
+    unchecked_math,
+    unchecked_shifts
 )]
-#![cfg_attr(test, feature(test, abi_vectorcall))]
+#![cfg_attr(test, feature(test, abi_vectorcall, lazy_cell))]
 #![deny(clippy::missing_inline_in_public_items)]
 #![allow(
     clippy::identity_op,
diff --git a/crates/core_arch/src/macros.rs b/crates/core_arch/src/macros.rs
index 56d922b0fd..25d7cd8df7 100644
--- a/crates/core_arch/src/macros.rs
+++ b/crates/core_arch/src/macros.rs
@@ -48,6 +48,22 @@ macro_rules! static_assert_simm_bits {
     };
 }
 
+#[allow(unused_macros)]
+macro_rules! static_assert_range {
+    ($imm:ident, $min:literal, $max:literal) => {
+        static_assert!(
+            $min <= $imm && $imm <= $max,
+            concat!(
+                stringify!($imm),
+                " is not in range ",
+                stringify!($min),
+                "-",
+                stringify!($max),
+            )
+        )
+    };
+}
+
 #[allow(unused)]
 macro_rules! types {
     ($(
diff --git a/crates/core_arch/src/simd_llvm.rs b/crates/core_arch/src/simd_llvm.rs
index decdecaaf4..1eca13621a 100644
--- a/crates/core_arch/src/simd_llvm.rs
+++ b/crates/core_arch/src/simd_llvm.rs
@@ -18,7 +18,8 @@ extern "platform-intrinsic" {
     //pub fn simd_select
     pub fn simd_bitmask<T, U>(x: T) -> U;
 
-    pub fn simd_cast<T, U>(x: T) -> U;
+    pub fn simd_cast<T: ?Sized, U: ?Sized>(x: T) -> U;
+    pub fn simd_reinterpret<Src: ?Sized, Dst: ?Sized>(src: Src) -> Dst;
 
     pub fn simd_add<T>(x: T, y: T) -> T;
     pub fn simd_sub<T>(x: T, y: T) -> T;
@@ -52,7 +53,7 @@ extern "platform-intrinsic" {
     pub fn simd_reduce_all<T>(x: T) -> bool;
     pub fn simd_reduce_any<T>(x: T) -> bool;
 
-    pub fn simd_select<M, T>(m: M, a: T, b: T) -> T;
+    pub fn simd_select<M: ?Sized, T: ?Sized>(m: M, a: T, b: T) -> T;
     pub fn simd_select_bitmask<M, T>(m: M, a: T, b: T) -> T;
 
     pub fn simd_fmin<T>(a: T, b: T) -> T;

From 01043029d2ecf6889190b59135b73dd10fe1a87f Mon Sep 17 00:00:00 2001
From: James McGregor <james.mcgregor2@arm.com>
Date: Mon, 11 Dec 2023 15:51:21 +0000
Subject: [PATCH 4/6] Add SVE support to the intrinsic test tool

Co-authored-by: Jamie Cunliffe <Jamie.Cunliffe@arm.com>
Co-authored-by: Adam Gemmell <adam.gemmell@arm.com>
Co-authored-by: Jacob Bramley <jacob.bramley@arm.com>
---
 .../aarch64-unknown-linux-gnu/Dockerfile      |   2 +-
 ci/run.sh                                     |   5 +
 crates/intrinsic-test/README.md               |   1 +
 crates/intrinsic-test/missing_aarch64.txt     |  41 ++
 crates/intrinsic-test/src/argument.rs         | 276 ++++++++---
 crates/intrinsic-test/src/intrinsic.rs        | 402 +++++++++++----
 crates/intrinsic-test/src/json_parser.rs      |  78 ++-
 crates/intrinsic-test/src/main.rs             | 461 +++++++++++++-----
 crates/intrinsic-test/src/types.rs            | 434 ++++++++++++-----
 crates/intrinsic-test/src/values.rs           |  57 ++-
 10 files changed, 1348 insertions(+), 409 deletions(-)

diff --git a/ci/docker/aarch64-unknown-linux-gnu/Dockerfile b/ci/docker/aarch64-unknown-linux-gnu/Dockerfile
index 8f4aba45c3..87cec1e394 100644
--- a/ci/docker/aarch64-unknown-linux-gnu/Dockerfile
+++ b/ci/docker/aarch64-unknown-linux-gnu/Dockerfile
@@ -14,5 +14,5 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
   lld
 
 ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc \
-    CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER="qemu-aarch64 -L /usr/aarch64-linux-gnu" \
+    CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER="qemu-aarch64 -cpu max,sve512=on -L /usr/aarch64-linux-gnu" \
     OBJDUMP=aarch64-linux-gnu-objdump
diff --git a/ci/run.sh b/ci/run.sh
index a13e5963f8..3b4442c29c 100755
--- a/ci/run.sh
+++ b/ci/run.sh
@@ -148,11 +148,16 @@ case ${TARGET} in
 esac
 
 if [ "${TARGET}" = "aarch64-unknown-linux-gnu" ]; then
+
     (
         CPPFLAGS="-fuse-ld=lld -I/usr/aarch64-linux-gnu/include/ -I/usr/aarch64-linux-gnu/include/c++/9/aarch64-linux-gnu/" \
             RUSTFLAGS="$HOST_RUSTFLAGS" \
             RUST_LOG=warn \
             cargo run ${INTRINSIC_TEST} --release --bin intrinsic-test -- intrinsics_data/arm_intrinsics.json --runner "${CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER}" --cppcompiler "clang++-15" --skip crates/intrinsic-test/missing_aarch64.txt
+        CPPFLAGS="-fuse-ld=lld -I/usr/aarch64-linux-gnu/include/ -I/usr/aarch64-linux-gnu/include/c++/9/aarch64-linux-gnu/" \
+            RUSTFLAGS="$HOST_RUSTFLAGS" \
+            RUST_LOG=warn \
+            cargo run ${INTRINSIC_TEST} --release --bin intrinsic-test -- intrinsics_data/arm_intrinsics.json --runner "${CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER}" --cppcompiler "clang++-15" --skip crates/intrinsic-test/missing_aarch64.txt --sve
     )
 elif [ "${TARGET}" = "armv7-unknown-linux-gnueabihf" ]; then
     (
diff --git a/crates/intrinsic-test/README.md b/crates/intrinsic-test/README.md
index 260d59fca8..5c8e19fa29 100644
--- a/crates/intrinsic-test/README.md
+++ b/crates/intrinsic-test/README.md
@@ -9,6 +9,7 @@ USAGE:
 FLAGS:
         --a32              Run tests for A32 instrinsics instead of A64
         --generate-only    Regenerate test programs, but don't build or run them
+        --sve              Run tests for SVE instead of Neon
     -h, --help             Prints help information
     -V, --version          Prints version information
 
diff --git a/crates/intrinsic-test/missing_aarch64.txt b/crates/intrinsic-test/missing_aarch64.txt
index 33b7425d7c..1731e360f7 100644
--- a/crates/intrinsic-test/missing_aarch64.txt
+++ b/crates/intrinsic-test/missing_aarch64.txt
@@ -39,3 +39,44 @@ vrnd64z_f64
 #vqshluq_n_s8
 #vqshlus_n_s32
 
+# Not valid to compare these
+svundef_u8
+svundef_u16
+svundef_u32
+svundef_u64
+svundef_f32
+svundef_f64
+svundef_s8
+svundef_s16
+svundef_s32
+svundef_s64
+svundef2_u8
+svundef2_u16
+svundef2_u32
+svundef2_u64
+svundef2_f32
+svundef2_f64
+svundef2_s8
+svundef2_s16
+svundef2_s32
+svundef2_s64
+svundef3_u8
+svundef3_u16
+svundef3_u32
+svundef3_u64
+svundef3_f32
+svundef3_f64
+svundef3_s8
+svundef3_s16
+svundef3_s32
+svundef3_s64
+svundef4_u8
+svundef4_u16
+svundef4_u32
+svundef4_u64
+svundef4_f32
+svundef4_f64
+svundef4_s8
+svundef4_s16
+svundef4_s32
+svundef4_s64
diff --git a/crates/intrinsic-test/src/argument.rs b/crates/intrinsic-test/src/argument.rs
index e80760ca3a..54a296adec 100644
--- a/crates/intrinsic-test/src/argument.rs
+++ b/crates/intrinsic-test/src/argument.rs
@@ -1,9 +1,13 @@
+use std::iter::Iterator;
 use std::ops::Range;
 
 use crate::format::Indentation;
 use crate::json_parser::ArgPrep;
-use crate::types::{IntrinsicType, TypeKind};
-use crate::Language;
+use crate::types::{IntrinsicType, TypeKind, VecLen};
+use crate::values::{MAX_SVE_BITS, PRED_PATTERNS, SVE_GRANULE_BITS};
+use crate::{Extension, Language};
+
+use itertools::Itertools;
 
 /// An argument for the intrinsic.
 #[derive(Debug, PartialEq, Clone)]
@@ -22,6 +26,10 @@ pub struct Argument {
 pub enum Constraint {
     Equal(i64),
     Range(Range<i64>),
+    Svpattern,
+    Svprfop,
+    ImmRotation,
+    ImmRotationAdd,
 }
 
 impl TryFrom<ArgPrep> for Constraint {
@@ -45,10 +53,14 @@ impl TryFrom<ArgPrep> for Constraint {
 }
 
 impl Constraint {
-    pub fn to_range(&self) -> Range<i64> {
+    pub fn iter(&self) -> Box<dyn Iterator<Item = i64>> {
         match self {
-            Constraint::Equal(eq) => *eq..*eq + 1,
-            Constraint::Range(range) => range.clone(),
+            Constraint::Equal(eq) => Box::new(std::iter::once(*eq)),
+            Constraint::Range(range) => Box::new(range.clone()),
+            Constraint::Svpattern => Box::new((0..14).chain(29..32)),
+            Constraint::Svprfop => Box::new((0..6).chain(8..14)),
+            Constraint::ImmRotation => Box::new((0..271).step_by(90)),
+            Constraint::ImmRotationAdd => Box::new((90..271).step_by(180)),
         }
     }
 }
@@ -66,6 +78,16 @@ impl Argument {
         self.ty.is_ptr()
     }
 
+    pub fn is_predicate(&self) -> bool {
+        self.ty.is_predicate()
+    }
+
+    // Values for predicates, bools and immediates aren't loaded from a "populate_random" array and
+    // we instead use a new block for each preset value
+    pub fn uses_set_values(&self) -> bool {
+        self.has_constraint() || self.ty.kind() == TypeKind::Bool
+    }
+
     pub fn has_constraint(&self) -> bool {
         !self.constraints.is_empty()
     }
@@ -81,10 +103,31 @@ impl Argument {
     pub fn from_c(pos: usize, arg: &str, arg_prep: Option<ArgPrep>) -> Argument {
         let (ty, var_name) = Self::type_and_name_from_c(arg);
 
-        let ty = IntrinsicType::from_c(ty)
+        let mut ty = IntrinsicType::from_c(ty)
             .unwrap_or_else(|_| panic!("Failed to parse argument '{arg}'"));
 
-        let constraint = arg_prep.and_then(|a| a.try_into().ok());
+        if ty.is_predicate() {
+            if let Some(ap) = arg_prep.as_ref() {
+                let bit_len = ap.get_element_size().unwrap_or_else(|e| panic!("{e}"));
+                ty.set_inner_size(bit_len);
+            } else {
+                // Assume 8-bit lanes
+                // For example, svptest_* allow any length of predicate
+                ty.set_inner_size(8);
+            }
+        }
+
+        let constraint = arg_prep.and_then(|a| a.try_into().ok()).or_else(|| {
+            if ty.kind() == TypeKind::SvPattern {
+                Some(Constraint::Svpattern)
+            } else if ty.kind() == TypeKind::SvPrefetchOp {
+                Some(Constraint::Svprfop)
+            } else if var_name == "imm_rotation" {
+                Some(Constraint::ImmRotation)
+            } else {
+                None
+            }
+        });
 
         Argument {
             pos,
@@ -103,7 +146,8 @@ impl Argument {
                 kind: Int | UInt | Poly,
                 ..
             } => true,
-            _ => unimplemented!(),
+            IntrinsicType::Ptr { .. } => true,
+            ref ty => unimplemented!("{:#?}", ty),
         }
     }
 
@@ -124,6 +168,33 @@ impl Argument {
             format!("{}_vals", self.name.to_lowercase())
         }
     }
+
+    /// Returns a vector of predication setup statements for this argument
+    pub fn get_predicate_decls(&self, indentation: Indentation, language: Language) -> Vec<String> {
+        assert!(self.is_predicate());
+        let psize = self.ty.inner_size();
+        let (bind, open, close) = if let Language::Rust = language {
+            ("let ", "unsafe {", "}")
+        } else {
+            ("", "", "")
+        };
+
+        PRED_PATTERNS
+            .iter()
+            .map(|pat| {
+                let pat_string = pat
+                    .iter()
+                    .take((SVE_GRANULE_BITS / psize) as usize)
+                    .map(|b| b.to_string())
+                    .join(", ");
+
+                format!(
+                    "{indentation}{bind}{} = {open}svdupq_n_b{psize}({pat_string}){close};",
+                    self.name
+                )
+            })
+            .collect()
+    }
 }
 
 #[derive(Debug, PartialEq, Clone)]
@@ -133,18 +204,9 @@ pub struct ArgumentList {
 
 impl ArgumentList {
     /// Converts the argument list into the call parameters for a C function call.
-    /// e.g. this would generate something like `a, &b, c`
+    /// e.g. this would generate something like `a, b, c`
     pub fn as_call_param_c(&self) -> String {
-        self.args
-            .iter()
-            .map(|arg| match arg.ty {
-                IntrinsicType::Ptr { .. } => {
-                    format!("&{}", arg.name)
-                }
-                IntrinsicType::Type { .. } => arg.name.clone(),
-            })
-            .collect::<Vec<String>>()
-            .join(", ")
+        self.args.iter().map(|arg| &arg.name).join(", ")
     }
 
     /// Converts the argument list into the call parameters for a Rust function.
@@ -153,8 +215,7 @@ impl ArgumentList {
         self.args
             .iter()
             .filter(|a| !a.has_constraint())
-            .map(|arg| arg.name.clone())
-            .collect::<Vec<String>>()
+            .map(|arg| arg.name.to_string())
             .join(", ")
     }
 
@@ -163,7 +224,6 @@ impl ArgumentList {
             .iter()
             .filter(|a| a.has_constraint())
             .map(|arg| arg.name.clone())
-            .collect::<Vec<String>>()
             .join(", ")
     }
 
@@ -172,17 +232,22 @@ impl ArgumentList {
     /// e.g `const int32x2_t a_vals = {0x3effffff, 0x3effffff, 0x3f7fffff}`, if loads=2.
     pub fn gen_arglists_c(&self, indentation: Indentation, loads: u32) -> String {
         self.iter()
+            .filter(|arg| !arg.uses_set_values())
             .filter_map(|arg| {
+                let ty = if arg.is_ptr() {
+                    "uintptr_t".to_string()
+                } else {
+                    arg.ty.c_scalar_type()
+                };
+
                 (!arg.has_constraint()).then(|| {
                     format!(
                         "{indentation}const {ty} {name}_vals[] = {values};",
-                        ty = arg.ty.c_scalar_type(),
                         name = arg.name,
                         values = arg.ty.populate_random(indentation, loads, &Language::C)
                     )
                 })
             })
-            .collect::<Vec<_>>()
             .join("\n")
     }
 
@@ -190,82 +255,145 @@ impl ArgumentList {
     /// values can be loaded as a sliding window, e.g `const A_VALS: [u32; 20]  = [...];`
     pub fn gen_arglists_rust(&self, indentation: Indentation, loads: u32) -> String {
         self.iter()
+            .filter(|arg| !arg.uses_set_values())
             .filter_map(|arg| {
                 (!arg.has_constraint()).then(|| {
+                    let vlen = arg.ty.num_lanes().map_or(1, |v| {
+                        if let VecLen::Fixed(n) = v {
+                            n
+                        } else {
+                            MAX_SVE_BITS / arg.ty.inner_size()
+                        }
+                    });
+                    let load_size = vlen * arg.ty.num_vectors() + loads - 1;
+
+                    let ty = if arg.is_ptr() {
+                        "usize".to_string()
+                    } else {
+                        arg.ty.rust_scalar_type()
+                    };
                     format!(
                         "{indentation}{bind} {name}: [{ty}; {load_size}] = {values};",
                         bind = arg.rust_vals_array_binding(),
                         name = arg.rust_vals_array_name(),
-                        ty = arg.ty.rust_scalar_type(),
-                        load_size = arg.ty.num_lanes() * arg.ty.num_vectors() + loads - 1,
                         values = arg.ty.populate_random(indentation, loads, &Language::Rust)
                     )
                 })
             })
-            .collect::<Vec<_>>()
             .join("\n")
     }
 
-    /// Creates a line for each argument that initializes the argument from an array `[arg]_vals` at
-    /// an offset `i` using a load intrinsic, in C.
-    /// e.g `uint8x8_t a = vld1_u8(&a_vals[i]);`
-    pub fn load_values_c(&self, indentation: Indentation, p64_armv7_workaround: bool) -> String {
-        self.iter()
-            .filter_map(|arg| {
-                // The ACLE doesn't support 64-bit polynomial loads on Armv7
-                // This and the cast are a workaround for this
-                let armv7_p64 = if let TypeKind::Poly = arg.ty.kind() {
-                    p64_armv7_workaround
-                } else {
-                    false
-                };
-
-                (!arg.has_constraint()).then(|| {
-                    format!(
-                        "{indentation}{ty} {name} = {open_cast}{load}(&{name}_vals[i]){close_cast};\n",
-                        ty = arg.to_c_type(),
-                        name = arg.name,
-                        load = if arg.is_simd() {
-                            arg.ty.get_load_function(p64_armv7_workaround)
+    /// Creates a line that initalizes this argument from a pointer p_[arg] using a
+    /// load intrinsic, e.g. `uint8x8_t a = vld1_u8(p_a++);`
+    pub fn load_values_c(
+        &self,
+        indentation: Indentation,
+        mode: Extension,
+        is_aarch32: bool,
+    ) -> String {
+        if let Extension::SVE = mode {
+            self.iter()
+                .filter_map(|arg| {
+                    (!arg.uses_set_values()).then(|| {
+                        if arg.is_simd() {
+                            format!(
+                                "{indentation}{ty} {name} = {load}(svptrue_b{psize}(), &{name}_vals[i]);",
+                                psize = arg.ty.inner_size(),
+                                ty = arg.to_c_type(),
+                                name = arg.name,
+                                load = arg.ty.get_load_function_sve()
+                            )
                         } else {
-                            "*".to_string()
-                        },
-                        open_cast = if armv7_p64 {
-                            format!("cast<{}>(", arg.to_c_type())
-                        } else {
-                            "".to_string()
-                        },
-                        close_cast = if armv7_p64 {
-                            ")".to_string()
+                            format!(
+                                "{indentation}{ty} {name} = {cast}{name}_vals[i];",
+                                ty = arg.to_c_type(),
+                                name = arg.name,
+                                cast = if arg.is_ptr() {
+                                    format!("({})", arg.to_c_type())
+                                } else {
+                                    String::new()
+                                },
+                            )
+                        }
+                    })
+                })
+                .join("\n")
+        } else {
+            self.iter()
+                .filter_map(|arg| {
+                    // The ACLE doesn't support 64-bit polynomial loads on Armv7
+                    // This and the cast are a workaround for this
+                    let armv7_p64 = if arg.ty.is_p64() { is_aarch32 } else { false };
+
+                    let (open_cast, close_cast) = if armv7_p64 {
+                        (format!("cast<{}>(", arg.to_c_type()), ")")
+                    } else {
+                        ("".to_string(), "")
+                    };
+
+                    (!arg.uses_set_values()).then(|| {
+                        if arg.is_simd() {
+                            format!(
+                                "{indentation}{ty} {name} = {open_cast}{load}(&{name}_vals[i]){close_cast};",
+                                ty = arg.to_c_type(),
+                                name = arg.name,
+                                load = arg.ty.get_load_function(is_aarch32),
+                                open_cast = open_cast,
+                                close_cast = close_cast
+                            )
                         } else {
-                            "".to_string()
+                            format!(
+                                "{indentation}{ty} {name} = {open_cast} {name}_vals[i] {close_cast};",
+                                ty = arg.to_c_type(),
+                                name = arg.name,
+                                open_cast = open_cast,
+                                close_cast = close_cast
+                            )
                         }
-                    )
+                    })
                 })
-            })
-            .collect()
+                .join("\n")
+        }
     }
 
     /// Creates a line for each argument that initializes the argument from array `[ARG]_VALS` at
     /// an offset `i` using a load intrinsic, in Rust.
     /// e.g `let a = vld1_u8(A_VALS.as_ptr().offset(i));`
-    pub fn load_values_rust(&self, indentation: Indentation) -> String {
+    pub fn load_values_rust(&self, indentation: Indentation, mode: Extension) -> String {
         self.iter()
             .filter_map(|arg| {
-                (!arg.has_constraint()).then(|| {
-                    format!(
-                        "{indentation}let {name} = {load}({vals_name}.as_ptr().offset(i));\n",
-                        name = arg.name,
-                        vals_name = arg.rust_vals_array_name(),
-                        load = if arg.is_simd() {
-                            arg.ty.get_load_function(false)
-                        } else {
-                            "*".to_string()
-                        },
-                    )
+                (!arg.uses_set_values()).then(|| {
+                    if arg.is_simd() {
+                        format!(
+                            "{indentation}let {name} = {load}({predicate}{array_name}.as_ptr().offset(i));",
+                            name = arg.name,
+                            array_name = arg.rust_vals_array_name(),
+                            load = if let Extension::SVE = mode {
+                                arg.ty.get_load_function_sve()
+                            } else {
+                                arg.ty.get_load_function(false)
+                            },
+                            predicate = if let Extension::SVE = mode {
+                                format!("svptrue_b{}(), ", arg.ty.inner_size())
+                            } else {
+                                "".to_string()
+                            }
+                        )
+                    } else {
+                        format!(
+                            "{indentation}let {name} = {array_name}[i as usize]{cast};",
+                            name = arg.name,
+                            array_name = arg.rust_vals_array_name(),
+                            cast = if arg.is_ptr() {
+                                format!(" as *const {}", arg.ty.rust_scalar_type())
+                            } else {
+                                String::new()
+                            },
+                        )
+                    }
                 })
             })
-            .collect()
+            .join("\n")
     }
 
     pub fn iter(&self) -> std::slice::Iter<'_, Argument> {
diff --git a/crates/intrinsic-test/src/intrinsic.rs b/crates/intrinsic-test/src/intrinsic.rs
index b83c371ea4..6866e8e6af 100644
--- a/crates/intrinsic-test/src/intrinsic.rs
+++ b/crates/intrinsic-test/src/intrinsic.rs
@@ -1,8 +1,20 @@
-use crate::format::Indentation;
-use crate::types::{IntrinsicType, TypeKind};
+use crate::{
+    format::Indentation,
+    types::{IntrinsicType, TypeKind, VecLen},
+    values::MAX_SVE_BITS,
+    Extension, Language,
+};
 
 use super::argument::ArgumentList;
 
+#[derive(Debug, PartialEq, Copy, Clone)]
+pub enum Predication {
+    None,
+    Merging,
+    Zeroing,
+    DontCare,
+}
+
 /// An intrinsic
 #[derive(Debug, PartialEq, Clone)]
 pub struct Intrinsic {
@@ -17,72 +29,263 @@ pub struct Intrinsic {
 
     /// Whether this intrinsic is only available on A64.
     pub a64_only: bool,
+
+    /// The type of predication (if any) this intrinsic uses.
+    pub predication: Predication,
 }
 
 impl Intrinsic {
-    /// Generates a std::cout for the intrinsics results that will match the
-    /// rust debug output format for the return type. The generated line assumes
-    /// there is an int i in scope which is the current pass number.
-    pub fn print_result_c(&self, indentation: Indentation, additional: &str) -> String {
-        let lanes = if self.results.num_vectors() > 1 {
-            (0..self.results.num_vectors())
-                .map(|vector| {
-                    format!(
-                        r#""{ty}(" << {lanes} << ")""#,
-                        ty = self.results.c_single_vector_type(),
-                        lanes = (0..self.results.num_lanes())
-                            .map(move |idx| -> std::string::String {
-                                format!(
-                                    "{cast}{lane_fn}(__return_value.val[{vector}], {lane})",
-                                    cast = self.results.c_promotion(),
-                                    lane_fn = self.results.get_lane_function(),
-                                    lane = idx,
-                                    vector = vector,
-                                )
-                            })
-                            .collect::<Vec<_>>()
-                            .join(r#" << ", " << "#)
-                    )
-                })
-                .collect::<Vec<_>>()
-                .join(r#" << ", " << "#)
-        } else if self.results.num_lanes() > 1 {
-            (0..self.results.num_lanes())
-                .map(|idx| -> std::string::String {
-                    format!(
-                        "{cast}{lane_fn}(__return_value, {lane})",
-                        cast = self.results.c_promotion(),
-                        lane_fn = self.results.get_lane_function(),
-                        lane = idx
-                    )
-                })
-                .collect::<Vec<_>>()
-                .join(r#" << ", " << "#)
+    pub fn print_results_c(
+        &self,
+        indentation: Indentation,
+        var_name: &str,
+        context: &str,
+    ) -> String {
+        let open = format!(
+            r#"std::cout << std::boolalpha << "Result{context} "{iter} << ": {ty}" << std::fixed << std::setprecision(150);"#,
+            iter = if self.arguments.iter().any(|a| !a.uses_set_values()) {
+                " << i+1"
+            } else {
+                ""
+            },
+            ty = if self.results.is_simd() {
+                format!("{}(", self.results.c_type())
+            } else {
+                "".to_string()
+            }
+        );
+
+        let close = format!(
+            r#"std::cout << "{brace}" << std::endl;"#,
+            brace = if self.results.is_simd() { ")" } else { "" },
+        );
+        let indentation_1 = indentation.nested();
+        let indentation_2 = indentation_1.nested();
+        format!(
+            r#"{indentation}{open}
+{indentation}for (int j=0; j<element_count; j++) {{
+{indentation_1}std::cout << {cast}{var_name}[j];
+{indentation_1}if (j < element_count-1) {{
+{indentation_2}std::cout << ", ";
+{indentation_1}}}
+{indentation}}}
+{indentation}{close}"#,
+            open = open,
+            close = close,
+            var_name = var_name,
+            cast = self.results.c_promotion()
+        )
+    }
+
+    pub fn print_results_rust(&self, indentation: Indentation, context: &str) -> String {
+        let open = format!(
+            r#"print!("Result{context} {{}}: {ty}", {iter});"#,
+            iter = if self.arguments.iter().any(|a| !a.uses_set_values()) {
+                "i+1"
+            } else {
+                "\"\""
+            },
+            ty = if self.results.is_simd() {
+                format!("{}(", self.results.rust_type())
+            } else {
+                "".to_string()
+            },
+        );
+
+        let close = format!(
+            r#"println!("{brace}")"#,
+            brace = if self.results.is_simd() { ")" } else { "" },
+        );
+
+        let bool_cast = if self.results.kind() == TypeKind::Bool && !self.results.is_predicate() {
+            // Match C's bool printing behaviour
+            " as i32"
         } else {
+            ""
+        };
+
+        let indentation_1 = indentation.nested();
+        let indentation_2 = indentation_1.nested();
+        format!(
+            r#"{indentation}{open}
+{indentation}for j in 0..element_count {{
+{indentation_1}print!("{{:.150?}}", results_array[j as usize]{bool_cast});
+{indentation_1}if j < element_count-1 {{
+{indentation_2}print!(", ");
+{indentation_1}}}
+{indentation}}}
+{indentation}{close}"#,
+        )
+    }
+
+    pub fn gen_results_array_c(&self, indentation: Indentation) -> String {
+        let ty = if self.results.is_predicate() {
+            // We'll convert predicates to ints later
+            format!("int{}_t", self.results.inner_size())
+        } else {
+            self.results.c_scalar_type()
+        };
+
+        format!(
+            "{indentation}{ty} results_array[{size}] = {{0}};",
+            size = if self.results.is_simd() {
+                match self.results.num_lanes().unwrap() {
+                    // If an SVE vector is returned, assume the largest possible vector size
+                    VecLen::Scalable => {
+                        (MAX_SVE_BITS / self.results.inner_size()) * self.results.num_vectors()
+                    }
+                    VecLen::Fixed(n) => n * self.results.num_vectors(),
+                }
+            } else {
+                1
+            }
+        )
+    }
+
+    pub fn gen_results_array_rust(&self, indentation: Indentation) -> String {
+        let ty = if self.results.is_predicate() {
+            // Predicates are converted to ints
+            format!("i{}", self.results.inner_size())
+        } else {
+            self.results.rust_scalar_type()
+        };
+        format!(
+            "{indentation}let mut results_array: [{ty}; {size}] = [Default::default(); {size}];",
+            size = if self.results.is_simd() {
+                match self.results.num_lanes().unwrap() {
+                    // If an SVE vector is returned, assume the largest possible vector size
+                    VecLen::Scalable => {
+                        (MAX_SVE_BITS / self.results.inner_size()) * self.results.num_vectors()
+                    }
+                    VecLen::Fixed(n) => n * self.results.num_vectors(),
+                }
+            } else {
+                1
+            }
+        )
+    }
+
+    /// Returns a line which stores the result of this intrinsic to array `results_array`, in the
+    /// provided language.
+    /// e.g `svst1_u32(svtrue_b32(), results_array.as_ptr().offset(i), __return_value);`, for intrinsic
+    /// svadd_s32_z and language Rust.
+    fn store_result(
+        &self,
+        indentation: Indentation,
+        language: Language,
+        is_aarch32: bool,
+    ) -> String {
+        let results = &self.results;
+        if results.is_simd() {
+            let arg_result = if results.is_predicate() {
+                format!("svdup_n_s{}_z(__return_value, 1)", results.inner_size())
+            } else if results.is_p64() && is_aarch32 && language == Language::C {
+                "cast<int64x1_t>(__return_value)".to_string()
+            } else {
+                "__return_value".to_string()
+            };
+
             format!(
-                "{promote}cast<{cast}>(__return_value)",
-                cast = match self.results.kind() {
-                    TypeKind::Float if self.results.inner_size() == 32 => "float".to_string(),
-                    TypeKind::Float if self.results.inner_size() == 64 => "double".to_string(),
-                    TypeKind::Int => format!("int{}_t", self.results.inner_size()),
-                    TypeKind::UInt => format!("uint{}_t", self.results.inner_size()),
-                    TypeKind::Poly => format!("poly{}_t", self.results.inner_size()),
-                    ty => todo!("print_result_c - Unknown type: {:#?}", ty),
+                "{indentation}{store}({predicate}{arg_array}, {arg_result});",
+                store = results.get_store_function(language == Language::C && is_aarch32),
+                predicate = if self.predication == Predication::DontCare {
+                    "pg, ".to_string()
+                } else if results.is_scalable() {
+                    format!("svptrue_b{}(), ", results.inner_size())
+                } else {
+                    "".to_string()
+                },
+                arg_array = if language == Language::C {
+                    "results_array"
+                } else {
+                    "results_array.as_mut_ptr()"
                 },
-                promote = self.results.c_promotion(),
             )
-        };
+        } else {
+            format!("{indentation}results_array[0] = __return_value;")
+        }
+    }
 
+    /// Returns a line which stores the number of elements in one intrinsic result in a variable
+    /// named `element_count`. For Neon this will be a fixed number, for SVE this will be either
+    /// a fixed number or a call to one of the `svcnt` intrinsics (possibly multiplied by some
+    /// factor, if multiple vectors are returned).
+    pub fn gen_element_count_c(&self, indentation: Indentation, language: Language) -> String {
         format!(
-            r#"{indentation}std::cout << "Result {additional}-" << i+1 << ": {ty}" << std::fixed << std::setprecision(150) <<  {lanes} << "{close}" << std::endl;"#,
-            ty = if self.results.is_simd() {
-                format!("{}(", self.results.c_type())
+            "{indentation}{rtype} element_count = {call};",
+            rtype = if let Language::Rust = language {
+                "let"
             } else {
-                String::from("")
+                "uint64_t"
             },
-            close = if self.results.is_simd() { ")" } else { "" },
-            lanes = lanes,
-            additional = additional,
+            call = match self.results {
+                IntrinsicType::Type {
+                    bit_len: Some(bit_len),
+                    simd_len: Some(VecLen::Scalable),
+                    vec_len,
+                    ..
+                } => format!(
+                    "{ropen} {num_vectors} * svcnt{size}() {rclose}",
+                    num_vectors = vec_len.unwrap_or(1),
+                    size = match bit_len {
+                        64 => "d",
+                        32 => "w",
+                        16 => "h",
+                        8 => "b",
+                        _ => unreachable!("non-SVE result bit-length"),
+                    },
+                    ropen = if let Language::Rust = language {
+                        "unsafe {"
+                    } else {
+                        ""
+                    },
+                    rclose = if let Language::Rust = language {
+                        "}"
+                    } else {
+                        ""
+                    }
+                ),
+                IntrinsicType::Type {
+                    simd_len: Some(VecLen::Fixed(sl)),
+                    vec_len,
+                    ..
+                } => format!("{}", vec_len.unwrap_or(1) * sl),
+                IntrinsicType::Type {
+                    simd_len: None,
+                    vec_len: None,
+                    ..
+                } => "1".to_string(),
+                _ => unreachable!("Shouldn't be called on this type"),
+            }
+        )
+    }
+
+    /// Returns a call to this intrinsic in the given language, storing the result in a variable
+    /// `varname`. For each pair in `overrides`, the any argument at position `pair.0`
+    /// will be called with `pair.1` instead of its actual name.
+    pub fn generate_call(
+        &self,
+        indentation: Indentation,
+        varname: &str,
+        language: Language,
+    ) -> String {
+        let constraints = self.arguments.as_constraint_parameters_rust();
+        let constraints = if !constraints.is_empty() {
+            format!("::<{constraints}>")
+        } else {
+            constraints
+        };
+
+        let (decl_var, constraints, args) = match language {
+            Language::Rust => ("let", constraints, self.arguments.as_call_param_rust()),
+            Language::C => ("auto", "".to_string(), self.arguments.as_call_param_c()),
+        };
+
+        format!(
+            "{indentation}{bind} {varname} = {intrinsic_call}{const}({args});",
+            bind = decl_var,
+            intrinsic_call = self.name,
+            const = constraints,
         )
     }
 
@@ -91,21 +294,31 @@ impl Intrinsic {
         indentation: Indentation,
         additional: &str,
         passes: u32,
-        p64_armv7_workaround: bool,
+        mode: Extension,
+        is_aarch32: bool,
     ) -> String {
-        let body_indentation = indentation.nested();
+        let block_indentation = indentation.nested();
+        let start = if self.arguments.iter().any(|a| !a.uses_set_values()) {
+            format!("{indentation}for (int i=0; i<{passes}; i++) {{")
+        } else {
+            format!("{indentation}{{")
+        };
+
         format!(
-            "{indentation}for (int i=0; i<{passes}; i++) {{\n\
-                {loaded_args}\
-                {body_indentation}auto __return_value = {intrinsic_call}({args});\n\
-                {print_result}\n\
-            {indentation}}}",
+            r#"{start}
+{loaded_args}
+{intrinsic_call}
+
+{store_result}
+{print_result}
+{indentation}}}
+"#,
             loaded_args = self
                 .arguments
-                .load_values_c(body_indentation, p64_armv7_workaround),
-            intrinsic_call = self.name,
-            args = self.arguments.as_call_param_c(),
-            print_result = self.print_result_c(body_indentation, additional)
+                .load_values_c(block_indentation, mode, is_aarch32),
+            intrinsic_call = self.generate_call(block_indentation, "__return_value", Language::C),
+            store_result = self.store_result(block_indentation, Language::C, is_aarch32),
+            print_result = self.print_results_c(block_indentation, "results_array", additional)
         )
     }
 
@@ -114,29 +327,42 @@ impl Intrinsic {
         indentation: Indentation,
         additional: &str,
         passes: u32,
+        mode: Extension,
+        is_aarch32: bool,
     ) -> String {
-        let constraints = self.arguments.as_constraint_parameters_rust();
-        let constraints = if !constraints.is_empty() {
-            format!("::<{constraints}>")
+        let block_indentation = if self.arguments.iter().any(|a| !a.uses_set_values()) {
+            indentation.nested()
         } else {
-            constraints
+            indentation
         };
 
-        let indentation2 = indentation.nested();
-        let indentation3 = indentation2.nested();
-        format!(
-            "{indentation}for i in 0..{passes} {{\n\
-                {indentation2}unsafe {{\n\
-                    {loaded_args}\
-                    {indentation3}let __return_value = {intrinsic_call}{const}({args});\n\
-                    {indentation3}println!(\"Result {additional}-{{}}: {{:.150?}}\", i + 1, __return_value);\n\
-                {indentation2}}}\n\
-            {indentation}}}",
-            loaded_args = self.arguments.load_values_rust(indentation3),
-            intrinsic_call = self.name,
-            const = constraints,
-            args = self.arguments.as_call_param_rust(),
-            additional = additional,
-        )
+        let mut block = format!(
+            r#"{block_indentation}unsafe {{
+{loaded_args}
+{intrinsic_call}
+
+{store_result}
+{print_result}
+{block_indentation}}}
+"#,
+            loaded_args = self
+                .arguments
+                .load_values_rust(block_indentation.nested(), mode),
+            intrinsic_call =
+                self.generate_call(block_indentation.nested(), "__return_value", Language::Rust),
+            store_result =
+                self.store_result(block_indentation.nested(), Language::Rust, is_aarch32),
+            print_result = self.print_results_rust(block_indentation.nested(), additional),
+        );
+
+        if self.arguments.iter().any(|a| !a.uses_set_values()) {
+            block = format!(
+                r#"{indentation}for i in 0..{passes} {{
+{block}
+{indentation}}}"#,
+            );
+        }
+
+        block
     }
 }
diff --git a/crates/intrinsic-test/src/json_parser.rs b/crates/intrinsic-test/src/json_parser.rs
index 8b3c7869c6..42149dc1e8 100644
--- a/crates/intrinsic-test/src/json_parser.rs
+++ b/crates/intrinsic-test/src/json_parser.rs
@@ -1,15 +1,17 @@
 use std::collections::HashMap;
 use std::path::Path;
 
+use regex::Regex;
 use serde::Deserialize;
 
-use crate::argument::{Argument, ArgumentList};
-use crate::intrinsic::Intrinsic;
+use crate::argument::{Argument, ArgumentList, Constraint};
+use crate::intrinsic::{Intrinsic, Predication};
 use crate::types::IntrinsicType;
 
 #[derive(Deserialize, Debug)]
 #[serde(deny_unknown_fields)]
 struct ReturnType {
+    element_bit_size: String,
     value: String,
 }
 
@@ -29,7 +31,42 @@ pub enum ArgPrep {
     Nothing {},
 }
 
+impl ArgPrep {
+    pub fn get_element_size(&self) -> Result<u32, String> {
+        // We only rely on argument preparation for the element size of predicates
+        // All other sizes are implicit in the type
+        lazy_static! {
+            // Syntax examples:
+            //   Pg.B
+            //   Pop2.S
+            //   Ptied.Q
+            static ref PREP_REGEX: Regex = Regex::new(r"^P[a-z]+\d?\.(B|H|S|D|Q)?").unwrap();
+        }
+        match self {
+            ArgPrep::Register { reg } => {
+                if let Some(caps) = PREP_REGEX.captures(reg) {
+                    let size_kind = caps.get(1).unwrap().as_str();
+                    match size_kind {
+                        "B" => Ok(8),
+                        "H" => Ok(16),
+                        "S" => Ok(32),
+                        "D" => Ok(64),
+                        "Q" => Ok(128),
+                        _ => panic!("{size_kind}?"),
+                    }
+                } else {
+                    Err(format!("Couldn't get element size for register {reg}"))
+                }
+            }
+            _ => Err(format!(
+                "Couldn't get element size from argument preparation {self:?}"
+            )),
+        }
+    }
+}
+
 #[derive(Deserialize, Debug)]
+#[serde(deny_unknown_fields)]
 struct JsonIntrinsic {
     #[serde(rename = "SIMD_ISA")]
     simd_isa: String,
@@ -40,9 +77,23 @@ struct JsonIntrinsic {
     args_prep: Option<HashMap<String, ArgPrep>>,
     #[serde(rename = "Architectures")]
     architectures: Vec<String>,
+    #[serde(rename = "instructions")]
+    _ins: Option<Vec<Vec<String>>>,
 }
 
 pub fn get_neon_intrinsics(filename: &Path) -> Result<Vec<Intrinsic>, Box<dyn std::error::Error>> {
+    get_intrinsics(filename, false)
+}
+
+pub fn get_sve_intrinsics(filename: &Path) -> Result<Vec<Intrinsic>, Box<dyn std::error::Error>> {
+    get_intrinsics(filename, true)
+}
+
+fn get_intrinsics(
+    filename: &Path,
+    is_sve: bool,
+) -> Result<Vec<Intrinsic>, Box<dyn std::error::Error>> {
+    let arch = if is_sve { "sve" } else { "Neon" };
     let file = std::fs::File::open(filename)?;
     let reader = std::io::BufReader::new(file);
     let json: Vec<JsonIntrinsic> = serde_json::from_reader(reader).expect("Couldn't parse JSON");
@@ -50,7 +101,7 @@ pub fn get_neon_intrinsics(filename: &Path) -> Result<Vec<Intrinsic>, Box<dyn st
     let parsed = json
         .into_iter()
         .filter_map(|intr| {
-            if intr.simd_isa == "Neon" {
+            if intr.simd_isa.starts_with(arch) {
                 Some(json_to_intrinsic(intr).expect("Couldn't parse JSON"))
             } else {
                 None
@@ -63,7 +114,8 @@ pub fn get_neon_intrinsics(filename: &Path) -> Result<Vec<Intrinsic>, Box<dyn st
 fn json_to_intrinsic(mut intr: JsonIntrinsic) -> Result<Intrinsic, Box<dyn std::error::Error>> {
     let name = intr.name.replace(['[', ']'], "");
 
-    let results = IntrinsicType::from_c(&intr.return_type.value)?;
+    let mut results = IntrinsicType::from_c(&intr.return_type.value)?;
+    results.set_inner_size(intr.return_type.element_bit_size.parse::<u32>()?);
 
     let mut args_prep = intr.args_prep.as_mut();
     let args = intr
@@ -74,6 +126,7 @@ fn json_to_intrinsic(mut intr: JsonIntrinsic) -> Result<Intrinsic, Box<dyn std::
             let arg_name = Argument::type_and_name_from_c(&arg).1;
             let arg_prep = args_prep.as_mut().and_then(|a| a.remove(arg_name));
             let mut arg = Argument::from_c(i, &arg, arg_prep);
+
             // The JSON doesn't list immediates as const
             if let IntrinsicType::Type {
                 ref mut constant, ..
@@ -83,16 +136,33 @@ fn json_to_intrinsic(mut intr: JsonIntrinsic) -> Result<Intrinsic, Box<dyn std::
                     *constant = true
                 }
             }
+            if (name.starts_with("svcadd_") || name.starts_with("svqcadd_"))
+                && arg_name == "imm_rotation"
+            {
+                arg.constraints = vec![Constraint::ImmRotationAdd];
+            }
             arg
         })
         .collect();
 
     let arguments = ArgumentList { args };
 
+    let predication = if name.ends_with("_m") {
+        Predication::Merging
+    } else if name.ends_with("_x") {
+        Predication::DontCare
+    } else if name.ends_with("_z") || arguments.iter().any(|a| a.is_predicate() && a.name == "pg") {
+        // Predicated intrinsics with only a zeroing form typically lack a _z suffix
+        Predication::Zeroing
+    } else {
+        Predication::None
+    };
+
     Ok(Intrinsic {
         name,
         arguments,
         results,
         a64_only: intr.architectures == vec!["A64".to_string()],
+        predication,
     })
 }
diff --git a/crates/intrinsic-test/src/main.rs b/crates/intrinsic-test/src/main.rs
index 00331e67c4..bbde7ec926 100644
--- a/crates/intrinsic-test/src/main.rs
+++ b/crates/intrinsic-test/src/main.rs
@@ -1,5 +1,7 @@
 #![feature(slice_partition_dedup)]
 #[macro_use]
+extern crate lazy_static;
+#[macro_use]
 extern crate log;
 
 use std::fs::File;
@@ -14,7 +16,7 @@ use types::TypeKind;
 
 use crate::argument::Argument;
 use crate::format::Indentation;
-use crate::json_parser::get_neon_intrinsics;
+use crate::json_parser::{get_neon_intrinsics, get_sve_intrinsics};
 
 mod argument;
 mod format;
@@ -23,52 +25,103 @@ mod json_parser;
 mod types;
 mod values;
 
-// The number of times each intrinsic will be called.
+// The number of times each intrinsic will be called per constraint (also per-predicate pattern,
+// for SVE intrinsics).
 const PASSES: u32 = 20;
 
-#[derive(Debug, PartialEq)]
+#[derive(Debug, Copy, Clone, PartialEq)]
 pub enum Language {
     Rust,
     C,
 }
 
+#[derive(Debug, Copy, Clone, PartialEq)]
+pub enum Extension {
+    NEON,
+    SVE,
+}
+
 fn gen_code_c(
     indentation: Indentation,
     intrinsic: &Intrinsic,
-    constraints: &[&Argument],
-    name: String,
-    p64_armv7_workaround: bool,
+    preset_vals: &[&Argument],
+    context: String,
+    mode: Extension,
+    is_aarch32: bool,
 ) -> String {
-    if let Some((current, constraints)) = constraints.split_last() {
-        let range = current
-            .constraints
-            .iter()
-            .map(|c| c.to_range())
-            .flat_map(|r| r.into_iter());
-
-        let body_indentation = indentation.nested();
-        range
-            .map(|i| {
-                format!(
-                    "{indentation}{{\n\
-                        {body_indentation}{ty} {name} = {val};\n\
-                        {pass}\n\
-                    {indentation}}}",
-                    name = current.name,
-                    ty = current.ty.c_type(),
-                    val = i,
-                    pass = gen_code_c(
-                        body_indentation,
-                        intrinsic,
-                        constraints,
-                        format!("{name}-{i}"),
-                        p64_armv7_workaround
+    if let Some((current, preset_vals)) = preset_vals.split_last() {
+        let name = &current.name;
+        if current.is_predicate() {
+            let passes = current
+                .get_predicate_decls(indentation.nested(), Language::C)
+                .into_iter()
+                .enumerate()
+                .map(|(i, p)| {
+                    format!(
+                        r#"{indentation}{{
+{p}
+{pass}
+{indentation}}}"#,
+                        pass = gen_code_c(
+                            indentation.nested(),
+                            intrinsic,
+                            preset_vals,
+                            format!("{context} {name}=pat{i}"),
+                            mode,
+                            is_aarch32
+                        )
                     )
+                })
+                .join("\n");
+            format!("{indentation}svbool_t {name};\n{passes}")
+        } else if current.ty.kind() == TypeKind::Bool {
+            // Some bool intrinics nest quite deeply, so prefer looping
+            format!(
+                r"{indentation}bool {name}_vals[] = {{true, false}};
+{indentation}for(bool {name}: {name}_vals) {{
+{pass}
+{indentation}}}",
+                pass = gen_code_c(
+                    indentation.nested(),
+                    intrinsic,
+                    preset_vals,
+                    format!("{context} {name}=\" << {name} << \""),
+                    mode,
+                    is_aarch32
                 )
-            })
-            .join("\n")
+            )
+        } else {
+            current
+                .constraints
+                .iter()
+                .flat_map(|c| c.iter())
+                .map(|i| {
+                    let ty = current.ty.c_type();
+                    let val = if current.ty.kind().is_enum() {
+                        format!("static_cast<{ty}>({i})")
+                    } else {
+                        i.to_string()
+                    };
+                    let indentation_1 = indentation.nested();
+                    format!(
+                        r#"{indentation}{{
+{indentation_1}{ty} {name} = {val};
+{pass}
+{indentation}}}"#,
+                        pass = gen_code_c(
+                            indentation_1,
+                            intrinsic,
+                            preset_vals,
+                            format!("{context} {name}={i}"),
+                            mode,
+                            is_aarch32
+                        )
+                    )
+                })
+                .join("\n")
+        }
     } else {
-        intrinsic.generate_loop_c(indentation, &name, PASSES, p64_armv7_workaround)
+        intrinsic.generate_loop_c(indentation, &context, PASSES, mode, is_aarch32)
     }
 }
 
@@ -76,30 +129,18 @@ fn generate_c_program(
     notices: &str,
     header_files: &[&str],
     intrinsic: &Intrinsic,
-    p64_armv7_workaround: bool,
+    mode: Extension,
+    is_aarch32: bool,
 ) -> String {
-    let constraints = intrinsic
+    let preset_vals = intrinsic
         .arguments
         .iter()
-        .filter(|i| i.has_constraint())
+        .filter(|i| i.uses_set_values())
         .collect_vec();
 
     let indentation = Indentation::default();
-    format!(
-        r#"{notices}{header_files}
-#include <iostream>
-#include <cstring>
-#include <iomanip>
-#include <sstream>
-
-template<typename T1, typename T2> T1 cast(T2 x) {{
-  static_assert(sizeof(T1) == sizeof(T2), "sizeof T1 and T2 must be the same");
-  T1 ret{{}};
-  memcpy(&ret, &x, sizeof(T1));
-  return ret;
-}}
 
-#ifdef __aarch64__
+    let neon_poly128_override = r#"#ifdef __aarch64__
 std::ostream& operator<<(std::ostream& os, poly128_t value) {{
   std::stringstream temp;
   do {{
@@ -113,78 +154,161 @@ std::ostream& operator<<(std::ostream& os, poly128_t value) {{
   return os;
 }}
 #endif
+"#;
+    let indentation_1 = indentation.nested();
+    let main_body = format!(
+        r#"{results_array}
+{element_count}
+
+{passes}
+{indentation_1}return 0;
+}}"#,
+        results_array = intrinsic.gen_results_array_c(indentation_1),
+        element_count = intrinsic.gen_element_count_c(indentation_1, Language::C),
+        passes = gen_code_c(
+            indentation_1,
+            intrinsic,
+            preset_vals.as_slice(),
+            Default::default(),
+            mode,
+            is_aarch32
+        )
+    );
+
+    format!(
+        r#"{notices}{header_files}
+#include <iostream>
+#include <cstring>
+#include <iomanip>
+#include <sstream>
+
+template<typename T1, typename T2> T1 cast(T2 x) {{
+  static_assert(sizeof(T1) == sizeof(T2), "sizeof T1 and T2 must be the same");
+  T1 ret{{}};
+  memcpy(&ret, &x, sizeof(T1));
+  return ret;
+}}
+
+{neon_poly128_override}
 
 {arglists}
 
 int main(int argc, char **argv) {{
-{passes}
-    return 0;
-}}"#,
+{body}"#,
         header_files = header_files
             .iter()
             .map(|header| format!("#include <{header}>"))
-            .collect::<Vec<_>>()
             .join("\n"),
         arglists = intrinsic.arguments.gen_arglists_c(indentation, PASSES),
-        passes = gen_code_c(
-            indentation.nested(),
-            intrinsic,
-            constraints.as_slice(),
-            Default::default(),
-            p64_armv7_workaround
-        ),
+        body = main_body,
+        neon_poly128_override = if let Extension::NEON = mode {
+            neon_poly128_override
+        } else {
+            ""
+        }
     )
 }
 
 fn gen_code_rust(
     indentation: Indentation,
     intrinsic: &Intrinsic,
-    constraints: &[&Argument],
-    name: String,
+    preset_vals: &[&Argument],
+    context: String,
+    mode: Extension,
+    is_aarch32: bool,
 ) -> String {
-    if let Some((current, constraints)) = constraints.split_last() {
-        let range = current
-            .constraints
-            .iter()
-            .map(|c| c.to_range())
-            .flat_map(|r| r.into_iter());
-
-        let body_indentation = indentation.nested();
-        range
-            .map(|i| {
-                format!(
-                    "{indentation}{{\n\
-                        {body_indentation}const {name}: {ty} = {val};\n\
-                        {pass}\n\
-                    {indentation}}}",
-                    name = current.name,
-                    ty = current.ty.rust_type(),
-                    val = i,
-                    pass = gen_code_rust(
-                        body_indentation,
-                        intrinsic,
-                        constraints,
-                        format!("{name}-{i}")
+    if let Some((current, preset_vals)) = preset_vals.split_last() {
+        let name = &current.name;
+        if current.is_predicate() {
+            current
+                .get_predicate_decls(indentation, Language::Rust)
+                .into_iter()
+                .enumerate()
+                .map(|(i, p)| {
+                    format!(
+                        r#"{p}
+{pass}"#,
+                        pass = gen_code_rust(
+                            indentation,
+                            intrinsic,
+                            preset_vals,
+                            format!("{context} {name}=pat{i}"),
+                            mode,
+                            is_aarch32
+                        )
                     )
+                })
+                .join("\n")
+        } else if current.ty.kind() == TypeKind::Bool {
+            // Some bool intrinics nest quite deeply, so prefer looping
+            format!(
+                r"{indentation}for {name} in [true, false] {{
+{pass}
+{indentation}}}",
+                pass = gen_code_rust(
+                    indentation.nested(),
+                    intrinsic,
+                    preset_vals,
+                    format!("{context} {name}={{{name}}}"),
+                    mode,
+                    is_aarch32
                 )
-            })
-            .join("\n")
+            )
+        } else {
+            current
+                .constraints
+                .iter()
+                .flat_map(|c| c.iter())
+                .map(|i| {
+                    let ty = current.ty.rust_type();
+                    let val = if current.ty.kind().is_enum() {
+                        // This is defined behaviour as enums in types.rs are `#[repr(i32)]`
+                        // in order to facilitating passing them as const-generics
+                        format!("unsafe {{ core::mem::transmute::<i32, _>({i}) }}")
+                    } else {
+                        i.to_string()
+                    };
+                    let indentation_1 = indentation.nested();
+                    format!(
+                        r#"{indentation}{{
+{indentation_1}const {name}: {ty} = {val};
+{pass}
+{indentation}}}"#,
+                        pass = gen_code_rust(
+                            indentation_1,
+                            intrinsic,
+                            preset_vals,
+                            format!("{context} {name}={i}"),
+                            mode,
+                            is_aarch32
+                        )
+                    )
+                })
+                .join("\n")
+        }
     } else {
-        intrinsic.generate_loop_rust(indentation, &name, PASSES)
+        intrinsic.generate_loop_rust(indentation, &context, PASSES, mode, is_aarch32)
     }
 }
 
-fn generate_rust_program(notices: &str, intrinsic: &Intrinsic, a32: bool) -> String {
-    let constraints = intrinsic
+fn generate_rust_program(
+    notices: &str,
+    intrinsic: &Intrinsic,
+    mode: Extension,
+    is_aarch32: bool,
+) -> String {
+    let preset_vals = intrinsic
         .arguments
         .iter()
-        .filter(|i| i.has_constraint())
+        .filter(|i| i.uses_set_values())
         .collect_vec();
 
     let indentation = Indentation::default();
     format!(
         r#"{notices}#![feature(simd_ffi)]
 #![feature(link_llvm_intrinsics)]
+#![feature(unsized_fn_params)]
+#![feature(unsized_locals)]
 #![cfg_attr(target_arch = "arm", feature(stdarch_arm_neon_intrinsics))]
 #![feature(stdarch_arm_crc32)]
 #![cfg_attr(target_arch = "aarch64", feature(stdarch_neon_fcma))]
@@ -193,43 +317,67 @@ fn generate_rust_program(notices: &str, intrinsic: &Intrinsic, a32: bool) -> Str
 #![cfg_attr(target_arch = "aarch64", feature(stdarch_neon_sha3))]
 #![cfg_attr(target_arch = "aarch64", feature(stdarch_neon_sm4))]
 #![cfg_attr(target_arch = "aarch64", feature(stdarch_neon_ftts))]
+#![cfg_attr(target_arch = "aarch64", feature(stdarch_aarch64_sve))]
 #![allow(non_upper_case_globals)]
-use core_arch::arch::{target_arch}::*;
+#![allow(internal_features)]
+#![allow(incomplete_features)]
+use core_arch::arch::{target_arch}::{extension};
 
 fn main() {{
+{results_array}
+{element_count}
+
 {arglists}
 {passes}
 }}
 "#,
-        target_arch = if a32 { "arm" } else { "aarch64" },
+        target_arch = if is_aarch32 { "arm" } else { "aarch64" },
+        extension = if let Extension::SVE = mode {
+            "sve::*"
+        } else {
+            "*"
+        },
         arglists = intrinsic
             .arguments
             .gen_arglists_rust(indentation.nested(), PASSES),
         passes = gen_code_rust(
             indentation.nested(),
             intrinsic,
-            &constraints,
-            Default::default()
-        )
+            &preset_vals,
+            Default::default(),
+            mode,
+            is_aarch32
+        ),
+        results_array = intrinsic.gen_results_array_rust(indentation.nested()),
+        element_count = intrinsic.gen_element_count_c(indentation.nested(), Language::Rust),
     )
 }
 
-fn compile_c(c_filename: &str, intrinsic: &Intrinsic, compiler: &str, a32: bool) -> bool {
-    let flags = std::env::var("CPPFLAGS").unwrap_or("".into());
+fn compile_c(
+    c_filename: &str,
+    intrinsic: &Intrinsic,
+    compiler: &str,
+    mode: Extension,
+    is_aarch32: bool,
+) -> bool {
+    let flags = std::env::var("CPPFLAGS").unwrap_or_default();
+    let mut a64_archflags = String::from("-march=armv8.6-a+crypto+sha3+sm4+crc+dotprod");
+    if let Extension::SVE = mode {
+        a64_archflags.push_str("+sve2-aes+sve2-sm4+sve2-sha3+sve2-bitperm+f32mm+f64mm");
+    }
 
     let output = Command::new("sh")
         .arg("-c")
         .arg(format!(
             // -ffp-contract=off emulates Rust's approach of not fusing separate mul-add operations
             "{cpp} {cppflags} {arch_flags} -ffp-contract=off -Wno-narrowing -O2 -target {target} -o c_programs/{intrinsic} {filename}",
-            target = if a32 { "armv7-unknown-linux-gnueabihf" } else { "aarch64-unknown-linux-gnu" },
-            arch_flags = if a32 { "-march=armv8.6-a+crypto+crc+dotprod" } else { "-march=armv8.6-a+crypto+sha3+crc+dotprod" },
+            target = if is_aarch32 { "armv7-unknown-linux-gnueabihf" } else { "aarch64-unknown-linux-gnu" },
+            arch_flags = if is_aarch32 { "-march=armv8.6-a+crypto+crc+dotprod" } else { a64_archflags.as_str() },
             filename = c_filename,
             intrinsic = intrinsic.name,
             cpp = compiler,
             cppflags = flags,
-        ))
-        .output();
+        )).output();
     if let Ok(output) = output {
         if output.status.success() {
             true
@@ -258,7 +406,13 @@ fn build_notices(line_prefix: &str) -> String {
     )
 }
 
-fn build_c(notices: &str, intrinsics: &Vec<Intrinsic>, compiler: Option<&str>, a32: bool) -> bool {
+fn build_c(
+    notices: &str,
+    intrinsics: &Vec<Intrinsic>,
+    compiler: Option<&str>,
+    mode: Extension,
+    is_aarch32: bool,
+) -> bool {
     let _ = std::fs::create_dir("c_programs");
     intrinsics
         .par_iter()
@@ -266,25 +420,36 @@ fn build_c(notices: &str, intrinsics: &Vec<Intrinsic>, compiler: Option<&str>, a
             let c_filename = format!(r#"c_programs/{}.cpp"#, i.name);
             let mut file = File::create(&c_filename).unwrap();
 
-            let c_code = generate_c_program(notices, &["arm_neon.h", "arm_acle.h"], i, a32);
+            let header = if let Extension::SVE = mode {
+                "arm_sve.h"
+            } else {
+                "arm_neon.h"
+            };
+            let c_code = generate_c_program(notices, &[header, "arm_acle.h"], i, mode, is_aarch32);
             file.write_all(c_code.into_bytes().as_slice()).unwrap();
             match compiler {
                 None => true,
-                Some(compiler) => compile_c(&c_filename, i, compiler, a32),
+                Some(compiler) => compile_c(&c_filename, i, compiler, mode, is_aarch32),
             }
         })
         .find_any(|x| !x)
         .is_none()
 }
 
-fn build_rust(notices: &str, intrinsics: &[Intrinsic], toolchain: Option<&str>, a32: bool) -> bool {
+fn build_rust(
+    notices: &str,
+    intrinsics: &[Intrinsic],
+    toolchain: Option<&str>,
+    mode: Extension,
+    is_aarch32: bool,
+) -> bool {
     intrinsics.iter().for_each(|i| {
         let rust_dir = format!(r#"rust_programs/{}"#, i.name);
         let _ = std::fs::create_dir_all(&rust_dir);
         let rust_filename = format!(r#"{rust_dir}/main.rs"#);
-        let mut file = File::create(&rust_filename).unwrap();
+        let mut file = File::create(rust_filename).unwrap();
 
-        let c_code = generate_rust_program(notices, i, a32);
+        let c_code = generate_rust_program(notices, i, mode, is_aarch32);
         file.write_all(c_code.into_bytes().as_slice()).unwrap();
     });
 
@@ -317,7 +482,6 @@ path = "{intrinsic}/main.rs""#,
                             intrinsic = i.name
                         )
                     })
-                    .collect::<Vec<_>>()
                     .join("\n")
             )
             .into_bytes()
@@ -330,19 +494,25 @@ path = "{intrinsic}/main.rs""#,
         Some(t) => t,
     };
 
+    let features = if mode == Extension::SVE {
+        "-Ctarget-feature=+sve,+sve2,+sve2-aes,+sve2-sm4,+sve2-sha3,+sve2-bitperm,+f32mm,+f64mm"
+    } else {
+        ""
+    };
+
     let output = Command::new("sh")
         .current_dir("rust_programs")
         .arg("-c")
         .arg(format!(
             "cargo {toolchain} build --target {target} --release",
             toolchain = toolchain,
-            target = if a32 {
+            target = if is_aarch32 {
                 "armv7-unknown-linux-gnueabihf"
             } else {
                 "aarch64-unknown-linux-gnu"
             },
         ))
-        .env("RUSTFLAGS", "-Cdebuginfo=0")
+        .env("RUSTFLAGS", format!("-Cdebuginfo=0 {features}"))
         .output();
     if let Ok(output) = output {
         if output.status.success() {
@@ -394,6 +564,10 @@ struct Cli {
     /// Regenerate test programs, but don't build or run them
     #[arg(long)]
     generate_only: bool,
+
+    /// Run tests for SVE instead of Neon
+    #[arg(long)]
+    sve: bool,
 }
 
 fn main() {
@@ -413,31 +587,47 @@ fn main() {
     } else {
         Default::default()
     };
+
     let a32 = args.a32;
-    let mut intrinsics = get_neon_intrinsics(&filename).expect("Error parsing input file");
+
+    let (mode, mut intrinsics) = if args.sve {
+        (
+            Extension::SVE,
+            get_sve_intrinsics(&filename).expect("Error parsing input file"),
+        )
+    } else {
+        (
+            Extension::NEON,
+            get_neon_intrinsics(&filename).expect("Error parsing input file"),
+        )
+    };
 
     intrinsics.sort_by(|a, b| a.name.cmp(&b.name));
 
     let mut intrinsics = intrinsics
         .into_iter()
-        // Not sure how we would compare intrinsic that returns void.
+        // Void intrinsics consist of stores, prefetch and svwrffr, all of which we can't test here
         .filter(|i| i.results.kind() != TypeKind::Void)
-        .filter(|i| i.results.kind() != TypeKind::BFloat)
-        .filter(|i| !(i.results.kind() == TypeKind::Float && i.results.inner_size() == 16))
-        .filter(|i| !i.arguments.iter().any(|a| a.ty.kind() == TypeKind::BFloat))
+        // Most pointer intrinsics access memory, which we handle with separate tests
+        .filter(|i| {
+            !i.arguments.iter().any(|a| a.is_ptr())
+                || i.name.starts_with("svwhilewr")
+                || i.name.starts_with("svwhilerw")
+        })
+        // Bases arguments are really pointers, but memory isn't accessed for address calculation
+        // intrinsics
+        .filter(|i| !i.arguments.iter().any(|a| a.name == "bases") || i.name.starts_with("svadr"))
         .filter(|i| {
             !i.arguments
                 .iter()
-                .any(|a| a.ty.kind() == TypeKind::Float && a.ty.inner_size() == 16)
+                .any(|a| !a.is_predicate() && a.ty.inner_size() == 128)
         })
-        // Skip pointers for now, we would probably need to look at the return
-        // type to work out how many elements we need to point to.
-        .filter(|i| !i.arguments.iter().any(|a| a.is_ptr()))
-        .filter(|i| !i.arguments.iter().any(|a| a.ty.inner_size() == 128))
         .filter(|i| !skip.contains(&i.name))
         .filter(|i| !(a32 && i.a64_only))
         .collect::<Vec<_>>();
+
     intrinsics.dedup();
+    println!("Testing {} intrinsics", intrinsics.len());
 
     let (toolchain, cpp_compiler) = if args.generate_only {
         (None, None)
@@ -450,16 +640,16 @@ fn main() {
 
     let notices = build_notices("// ");
 
-    if !build_c(&notices, &intrinsics, cpp_compiler.as_deref(), a32) {
+    if !build_c(&notices, &intrinsics, cpp_compiler.as_deref(), mode, a32) {
         std::process::exit(2);
     }
 
-    if !build_rust(&notices, &intrinsics, toolchain.as_deref(), a32) {
+    if !build_rust(&notices, &intrinsics, toolchain.as_deref(), mode, a32) {
         std::process::exit(3);
     }
 
     if let Some(ref toolchain) = toolchain {
-        if !compare_outputs(&intrinsics, toolchain, &c_runner, a32) {
+        if !compare_outputs(&intrinsics, toolchain, &c_runner, mode, a32) {
             std::process::exit(1)
         }
     }
@@ -471,7 +661,18 @@ enum FailureReason {
     Difference(String, String, String),
 }
 
-fn compare_outputs(intrinsics: &Vec<Intrinsic>, toolchain: &str, runner: &str, a32: bool) -> bool {
+fn compare_outputs(
+    intrinsics: &Vec<Intrinsic>,
+    toolchain: &str,
+    runner: &str,
+    mode: Extension,
+    is_aarch32: bool,
+) -> bool {
+    let features = if mode == Extension::SVE {
+        "-Ctarget-feature=+sve,+sve2,+sve2-aes,+sve2-sm4,+sve2-sha3,+sve2-bitperm,+f32mm,+f64mm"
+    } else {
+        ""
+    };
     let intrinsics = intrinsics
         .par_iter()
         .filter_map(|intrinsic| {
@@ -490,13 +691,13 @@ fn compare_outputs(intrinsics: &Vec<Intrinsic>, toolchain: &str, runner: &str, a
                     "cargo {toolchain} run --target {target} --bin {intrinsic} --release",
                     intrinsic = intrinsic.name,
                     toolchain = toolchain,
-                    target = if a32 {
+                    target = if is_aarch32 {
                         "armv7-unknown-linux-gnueabihf"
                     } else {
                         "aarch64-unknown-linux-gnu"
                     },
                 ))
-                .env("RUSTFLAGS", "-Cdebuginfo=0")
+                .env("RUSTFLAGS", format!("-Cdebuginfo=0 {features}"))
                 .output();
 
             let (c, rust) = match (c, rust) {
@@ -506,6 +707,8 @@ fn compare_outputs(intrinsics: &Vec<Intrinsic>, toolchain: &str, runner: &str, a
 
             if !c.status.success() {
                 error!("Failed to run C program for intrinsic {}", intrinsic.name);
+                error!("stdout: {}", std::str::from_utf8(&c.stdout).unwrap());
+                error!("stderr: {}", std::str::from_utf8(&c.stderr).unwrap());
                 return Some(FailureReason::RunC(intrinsic.name.clone()));
             }
 
@@ -514,6 +717,8 @@ fn compare_outputs(intrinsics: &Vec<Intrinsic>, toolchain: &str, runner: &str, a
                     "Failed to run rust program for intrinsic {}",
                     intrinsic.name
                 );
+                error!("stdout: {}", std::str::from_utf8(&rust.stdout).unwrap());
+                error!("stderr: {}", std::str::from_utf8(&rust.stderr).unwrap());
                 return Some(FailureReason::RunRust(intrinsic.name.clone()));
             }
 
diff --git a/crates/intrinsic-test/src/types.rs b/crates/intrinsic-test/src/types.rs
index a3db342ae2..4c4cf85841 100644
--- a/crates/intrinsic-test/src/types.rs
+++ b/crates/intrinsic-test/src/types.rs
@@ -1,20 +1,30 @@
 use std::fmt;
 use std::str::FromStr;
 
-use itertools::Itertools as _;
-
 use crate::format::Indentation;
-use crate::values::value_for_array;
+use crate::values::{value_for_array, MAX_SVE_BITS};
 use crate::Language;
 
+use itertools::Itertools;
+
+#[allow(dead_code)]
+#[derive(Debug, PartialEq, Copy, Clone)]
+pub enum VecLen {
+    Scalable,
+    Fixed(u32),
+}
+
 #[derive(Debug, PartialEq, Copy, Clone)]
 pub enum TypeKind {
     BFloat,
+    Bool,
     Float,
     Int,
     UInt,
     Poly,
     Void,
+    SvPattern,
+    SvPrefetchOp,
 }
 
 impl FromStr for TypeKind {
@@ -22,12 +32,15 @@ impl FromStr for TypeKind {
 
     fn from_str(s: &str) -> Result<Self, Self::Err> {
         match s {
-            "bfloat" => Ok(Self::BFloat),
-            "float" => Ok(Self::Float),
-            "int" => Ok(Self::Int),
+            "svbool" | "bool" => Ok(Self::Bool),
+            "svbfloat" | "bfloat" => Ok(Self::BFloat),
+            "svfloat" | "float" => Ok(Self::Float),
+            "svint" | "int" => Ok(Self::Int),
+            "svuint" | "uint" | "unsigned" => Ok(Self::UInt),
             "poly" => Ok(Self::Poly),
-            "uint" | "unsigned" => Ok(Self::UInt),
             "void" => Ok(Self::Void),
+            "svpattern" => Ok(Self::SvPattern),
+            "svprfop" => Ok(Self::SvPrefetchOp),
             _ => Err(format!("Impossible to parse argument kind {s}")),
         }
     }
@@ -39,12 +52,15 @@ impl fmt::Display for TypeKind {
             f,
             "{}",
             match self {
+                Self::Bool => "bool",
                 Self::BFloat => "bfloat",
                 Self::Float => "float",
                 Self::Int => "int",
                 Self::UInt => "uint",
                 Self::Poly => "poly",
                 Self::Void => "void",
+                Self::SvPattern => "svpattern",
+                Self::SvPrefetchOp => "svprfop",
             }
         )
     }
@@ -54,6 +70,7 @@ impl TypeKind {
     /// Gets the type part of a c typedef for a type that's in the form of {type}{size}_t.
     pub fn c_prefix(&self) -> &str {
         match self {
+            Self::Bool => "bool",
             Self::Float => "float",
             Self::Int => "int",
             Self::UInt => "uint",
@@ -72,6 +89,10 @@ impl TypeKind {
             _ => unreachable!("Unused type kind: {:#?}", self),
         }
     }
+
+    pub fn is_enum(&self) -> bool {
+        self == &TypeKind::SvPattern || self == &TypeKind::SvPrefetchOp
+    }
 }
 
 #[derive(Debug, PartialEq, Clone)]
@@ -84,13 +105,14 @@ pub enum IntrinsicType {
         constant: bool,
         kind: TypeKind,
         /// The bit length of this type (e.g. 32 for u32).
+        /// For predicates, this means the length of the element each predicate bit represents
         bit_len: Option<u32>,
 
-        /// Length of the SIMD vector (i.e. 4 for uint32x4_t), A value of `None`
-        /// means this is not a simd type. A `None` can be assumed to be 1,
-        /// although in some places a distinction is needed between `u64` and
+        /// Length of the vector (i.e. Fixed(4) for uint32x4_t), A value of `None`
+        /// means this is not a simd type. A value of `None` can be assumed to
+        /// be Fixed(1), although in some places a distinction is needed between `u64` and
         /// `uint64x1_t` this signals that.
-        simd_len: Option<u32>,
+        simd_len: Option<VecLen>,
 
         /// The number of rows for SIMD matrices (i.e. 2 for uint8x8x2_t).
         /// A value of `None` represents a type that does not contain any
@@ -118,17 +140,21 @@ impl IntrinsicType {
             IntrinsicType::Type {
                 bit_len: Some(bl), ..
             } => *bl,
-            _ => unreachable!(""),
+            _ => unreachable!("{self:?}"),
+        }
+    }
+
+    pub fn set_inner_size(&mut self, size: u32) {
+        match self {
+            IntrinsicType::Ptr { child, .. } => child.set_inner_size(size),
+            IntrinsicType::Type { bit_len, .. } => *bit_len = Some(size),
         }
     }
 
-    pub fn num_lanes(&self) -> u32 {
+    pub fn num_lanes(&self) -> Option<VecLen> {
         match *self {
             IntrinsicType::Ptr { ref child, .. } => child.num_lanes(),
-            IntrinsicType::Type {
-                simd_len: Some(sl), ..
-            } => sl,
-            _ => 1,
+            IntrinsicType::Type { simd_len, .. } => simd_len,
         }
     }
 
@@ -156,6 +182,17 @@ impl IntrinsicType {
         }
     }
 
+    pub fn is_scalable(&self) -> bool {
+        match *self {
+            IntrinsicType::Ptr { ref child, .. } => child.is_scalable(),
+            IntrinsicType::Type {
+                simd_len: Some(VecLen::Scalable),
+                ..
+            } => true,
+            _ => false,
+        }
+    }
+
     pub fn is_ptr(&self) -> bool {
         match *self {
             IntrinsicType::Ptr { .. } => true,
@@ -163,26 +200,57 @@ impl IntrinsicType {
         }
     }
 
-    pub fn c_scalar_type(&self) -> String {
-        format!(
-            "{prefix}{bits}_t",
-            prefix = self.kind().c_prefix(),
-            bits = self.inner_size()
+    pub fn is_predicate(&self) -> bool {
+        matches!(
+            *self,
+            IntrinsicType::Type {
+                kind: TypeKind::Bool,
+                simd_len: Some(_),
+                ..
+            }
         )
     }
 
+    pub fn is_p64(&self) -> bool {
+        match *self {
+            IntrinsicType::Ptr { ref child, .. } => child.is_p64(),
+            IntrinsicType::Type {
+                kind: TypeKind::Poly,
+                bit_len: Some(64),
+                ..
+            } => true,
+            _ => false,
+        }
+    }
+
+    pub fn c_scalar_type(&self) -> String {
+        if self.kind() == TypeKind::Bool {
+            "bool".to_string()
+        } else {
+            format!(
+                "{prefix}{bits}_t",
+                prefix = self.kind().c_prefix(),
+                bits = self.inner_size()
+            )
+        }
+    }
+
     pub fn rust_scalar_type(&self) -> String {
-        format!(
-            "{prefix}{bits}",
-            prefix = self.kind().rust_prefix(),
-            bits = self.inner_size()
-        )
+        if self.kind() == TypeKind::Bool {
+            "bool".to_string()
+        } else {
+            format!(
+                "{prefix}{bits}",
+                prefix = self.kind().rust_prefix(),
+                bits = self.inner_size()
+            )
+        }
     }
 
     /// Gets a string containing the typename for this type in C format.
     pub fn c_type(&self) -> String {
         match self {
-            IntrinsicType::Ptr { child, .. } => child.c_type(),
+            IntrinsicType::Ptr { child, .. } => format!("{}*", child.c_type()),
             IntrinsicType::Type {
                 constant,
                 kind,
@@ -190,69 +258,105 @@ impl IntrinsicType {
                 simd_len: None,
                 vec_len: None,
                 ..
-            } => format!(
-                "{}{}{}_t",
-                if *constant { "const " } else { "" },
-                kind.c_prefix(),
-                bit_len
-            ),
+            } => {
+                if kind.is_enum() {
+                    format!("const {kind}")
+                } else if *kind == TypeKind::Bool {
+                    kind.c_prefix().to_string()
+                } else {
+                    format!(
+                        "{}{}{}_t",
+                        if *constant { "const " } else { "" },
+                        kind.c_prefix(),
+                        bit_len
+                    )
+                }
+            }
             IntrinsicType::Type {
                 kind,
                 bit_len: Some(bit_len),
-                simd_len: Some(simd_len),
-                vec_len: None,
+                simd_len: Some(VecLen::Fixed(simd_len)),
+                vec_len: Some(1) | None,
                 ..
             } => format!("{}{bit_len}x{simd_len}_t", kind.c_prefix()),
             IntrinsicType::Type {
                 kind,
                 bit_len: Some(bit_len),
-                simd_len: Some(simd_len),
+                simd_len: Some(VecLen::Fixed(simd_len)),
                 vec_len: Some(vec_len),
                 ..
             } => format!("{}{bit_len}x{simd_len}x{vec_len}_t", kind.c_prefix()),
-            _ => todo!("{:#?}", self),
-        }
-    }
-
-    pub fn c_single_vector_type(&self) -> String {
-        match self {
-            IntrinsicType::Ptr { child, .. } => child.c_single_vector_type(),
             IntrinsicType::Type {
                 kind,
                 bit_len: Some(bit_len),
-                simd_len: Some(simd_len),
-                vec_len: Some(_),
+                simd_len: Some(VecLen::Scalable),
+                vec_len,
                 ..
-            } => format!("{}{bit_len}x{simd_len}_t", kind.c_prefix()),
-            _ => unreachable!("Shouldn't be called on this type"),
+            } => format!(
+                "sv{}{bit_len}{}_t",
+                kind.c_prefix(),
+                match vec_len {
+                    Some(len) if *len > 1 => format!("x{len}"),
+                    _ => "".to_string(),
+                }
+            ),
+            _ => unreachable!("{self:#?}"),
         }
     }
 
     pub fn rust_type(&self) -> String {
         match self {
-            IntrinsicType::Ptr { child, .. } => child.c_type(),
+            IntrinsicType::Ptr { child, .. } => format!("{}*", child.rust_type()),
             IntrinsicType::Type {
+                constant,
                 kind,
                 bit_len: Some(bit_len),
                 simd_len: None,
                 vec_len: None,
                 ..
-            } => format!("{}{bit_len}", kind.rust_prefix()),
+            } => {
+                if kind.is_enum() {
+                    kind.to_string()
+                } else if *constant {
+                    // We make all const generic parameters i32s - this will cause issues with
+                    // pointers to const data but the tool doesn't test those intrinsics
+                    "i32".to_string()
+                } else if *kind == TypeKind::Bool {
+                    "bool".to_string()
+                } else {
+                    format!("{}{bit_len}", kind.rust_prefix())
+                }
+            }
             IntrinsicType::Type {
                 kind,
                 bit_len: Some(bit_len),
-                simd_len: Some(simd_len),
-                vec_len: None,
+                simd_len: Some(VecLen::Fixed(simd_len)),
+                vec_len: Some(1) | None,
                 ..
             } => format!("{}{bit_len}x{simd_len}_t", kind.c_prefix()),
             IntrinsicType::Type {
                 kind,
                 bit_len: Some(bit_len),
-                simd_len: Some(simd_len),
+                simd_len: Some(VecLen::Fixed(simd_len)),
                 vec_len: Some(vec_len),
                 ..
             } => format!("{}{bit_len}x{simd_len}x{vec_len}_t", kind.c_prefix()),
-            _ => todo!("{:#?}", self),
+            IntrinsicType::Type {
+                kind,
+                bit_len: Some(bit_len),
+                simd_len: Some(VecLen::Scalable),
+                vec_len,
+                ..
+            } => format!(
+                "sv{}{}{}_t",
+                kind.c_prefix(),
+                bit_len,
+                match vec_len {
+                    Some(len) if *len > 1 => format!("x{len}"),
+                    _ => "".to_string(),
+                }
+            ),
+            _ => unreachable!("{self:#?}"),
         }
     }
 
@@ -267,10 +371,10 @@ impl IntrinsicType {
         match *self {
             IntrinsicType::Type {
                 kind,
-                bit_len: Some(bit_len),
+                bit_len: Some(8),
                 ..
-            } if bit_len == 8 => match kind {
-                TypeKind::Int => "(int)",
+            } => match kind {
+                TypeKind::Int | TypeKind::Bool => "(int)",
                 TypeKind::UInt => "(unsigned int)",
                 TypeKind::Poly => "(unsigned int)(uint8_t)",
                 _ => "",
@@ -309,7 +413,27 @@ impl IntrinsicType {
         language: &Language,
     ) -> String {
         match self {
-            IntrinsicType::Ptr { child, .. } => child.populate_random(indentation, loads, language),
+            IntrinsicType::Ptr { .. } => {
+                let (prefix, suffix) = match language {
+                    Language::Rust => ("[", "]"),
+                    Language::C => ("{", "}"),
+                };
+                format!(
+                    "{indentation}{prefix}{body}{suffix}",
+                    body = (0..sliding_window_value_count(64, None, 1, loads))
+                        .map(|i| {
+                            format!(
+                                "{}{}",
+                                value_for_array(64, i),
+                                match *language {
+                                    Language::Rust => " as usize",
+                                    Language::C => "",
+                                }
+                            )
+                        })
+                        .join(",")
+                )
+            }
             IntrinsicType::Type {
                 bit_len: Some(bit_len @ (8 | 16 | 32 | 64)),
                 kind: kind @ (TypeKind::Int | TypeKind::UInt | TypeKind::Poly),
@@ -318,14 +442,19 @@ impl IntrinsicType {
                 ..
             } => {
                 let (prefix, suffix) = match language {
-                    &Language::Rust => ("[", "]"),
-                    &Language::C => ("{", "}"),
+                    Language::Rust => ("[", "]"),
+                    Language::C => ("{", "}"),
                 };
-                let body_indentation = indentation.nested();
+
                 format!(
-                    "{prefix}\n{body}\n{indentation}{suffix}",
-                    body = (0..(simd_len.unwrap_or(1) * vec_len.unwrap_or(1) + loads - 1))
-                        .format_with(",\n", |i, fmt| {
+                    "{prefix}{body}{indentation}{suffix}",
+                    body = (0..sliding_window_value_count(
+                        *bit_len,
+                        *simd_len,
+                        vec_len.unwrap_or(1),
+                        loads
+                    ))
+                        .format_with(", ", |i, fmt| {
                             let src = value_for_array(*bit_len, i);
                             assert!(src == 0 || src.ilog2() < *bit_len);
                             if *kind == TypeKind::Int && (src >> (*bit_len - 1)) != 0 {
@@ -336,12 +465,12 @@ impl IntrinsicType {
                                 if (twos_compl == src) && (language == &Language::C) {
                                     // `src` is INT*_MIN. C requires `-0x7fffffff - 1` to avoid
                                     // undefined literal overflow behaviour.
-                                    fmt(&format_args!("{body_indentation}-{ones_compl:#x} - 1"))
+                                    fmt(&format_args!("-{ones_compl:#x} - 1"))
                                 } else {
-                                    fmt(&format_args!("{body_indentation}-{twos_compl:#x}"))
+                                    fmt(&format_args!("-{twos_compl:#x}"))
                                 }
                             } else {
-                                fmt(&format_args!("{body_indentation}{src:#x}"))
+                                fmt(&format_args!("{src:#x}"))
                             }
                         })
                 )
@@ -361,11 +490,15 @@ impl IntrinsicType {
                     _ => unreachable!(),
                 };
                 format!(
-                    "{prefix}\n{body}\n{indentation}{suffix}",
-                    body = (0..(simd_len.unwrap_or(1) * vec_len.unwrap_or(1) + loads - 1))
-                        .format_with(",\n", |i, fmt| fmt(&format_args!(
+                    "{prefix}{body}{indentation}{suffix}",
+                    body = (0..sliding_window_value_count(
+                        *bit_len,
+                        *simd_len,
+                        vec_len.unwrap_or(1),
+                        loads
+                    ))
+                        .format_with(", ", |i, fmt| fmt(&format_args!(
                             "{indentation}{cast_prefix}{src:#x}{cast_suffix}",
-                            indentation = indentation.nested(),
                             src = value_for_array(*bit_len, i)
                         )))
                 )
@@ -375,21 +508,18 @@ impl IntrinsicType {
     }
 
     /// Determines the load function for this type.
-    pub fn get_load_function(&self, armv7_p64_workaround: bool) -> String {
+    pub fn get_load_function(&self, is_aarch32: bool) -> String {
         match self {
-            IntrinsicType::Ptr { child, .. } => child.get_load_function(armv7_p64_workaround),
+            IntrinsicType::Ptr { child, .. } => child.get_load_function(is_aarch32),
             IntrinsicType::Type {
                 kind: k,
                 bit_len: Some(bl),
-                simd_len,
+                simd_len: Some(VecLen::Fixed(sl)),
                 vec_len,
                 ..
             } => {
-                let quad = if simd_len.unwrap_or(1) * bl > 64 {
-                    "q"
-                } else {
-                    ""
-                };
+                let quad = if (sl * bl) > 64 { "q" } else { "" };
+
                 format!(
                     "vld{len}{quad}_{type}{size}",
                     type = match k {
@@ -397,110 +527,192 @@ impl IntrinsicType {
                         TypeKind::Int => "s",
                         TypeKind::Float => "f",
                         // The ACLE doesn't support 64-bit polynomial loads on Armv7
-                        TypeKind::Poly => if armv7_p64_workaround && *bl == 64 {"s"} else {"p"},
-                        x => todo!("get_load_function TypeKind: {:#?}", x),
+                        TypeKind::Poly => if is_aarch32 && *bl == 64 {"s"} else {"p"},
+                        x => unreachable!("get_load_function: {x:#?}"),
                     },
                     size = bl,
                     quad = quad,
                     len = vec_len.unwrap_or(1),
                 )
             }
-            _ => todo!("get_load_function IntrinsicType: {:#?}", self),
+            _ => unreachable!("get_load_function {self:#?}"),
         }
     }
 
-    /// Determines the get lane function for this type.
-    pub fn get_lane_function(&self) -> String {
+    pub fn get_load_function_sve(&self) -> String {
         match self {
-            IntrinsicType::Ptr { child, .. } => child.get_lane_function(),
+            IntrinsicType::Ptr { child, .. } => child.get_load_function_sve(),
             IntrinsicType::Type {
                 kind: k,
                 bit_len: Some(bl),
-                simd_len,
+                simd_len: Some(VecLen::Scalable),
+                vec_len,
                 ..
             } => {
-                let quad = if (simd_len.unwrap_or(1) * bl) > 64 {
-                    "q"
-                } else {
-                    ""
-                };
                 format!(
-                    "vget{quad}_lane_{type}{size}",
+                    "svld{len}_{type}{size}",
                     type = match k {
                         TypeKind::UInt => "u",
                         TypeKind::Int => "s",
                         TypeKind::Float => "f",
-                        TypeKind::Poly => "p",
-                        x => todo!("get_load_function TypeKind: {:#?}", x),
+                        x => unreachable!("get_load_function {x:#?}"),
                     },
                     size = bl,
+                    len = vec_len.unwrap_or(1),
+                )
+            }
+            _ => unreachable!("get_load_function_sve {self:#?}"),
+        }
+    }
+
+    /// Determines the store function for this type.
+    pub fn get_store_function(&self, is_aarch32: bool) -> String {
+        match self {
+            IntrinsicType::Ptr { child, .. } => child.get_store_function(is_aarch32),
+            IntrinsicType::Type {
+                kind: k,
+                bit_len: Some(bl),
+                simd_len: Some(sl),
+                vec_len,
+                ..
+            } => {
+                let quad = match sl {
+                    VecLen::Fixed(len) if len * bl > 64 => "q",
+                    _ => "",
+                };
+
+                format!(
+                    "{sve}vst{len}{quad}_{ty}{size}",
+                    ty = match k {
+                        TypeKind::UInt => "u",
+                        // Predicates are converted to ints
+                        TypeKind::Int | TypeKind::Bool => "s",
+                        TypeKind::Float => "f",
+                        TypeKind::Poly =>
+                            if is_aarch32 && *bl == 64 {
+                                "s"
+                            } else {
+                                "p"
+                            },
+                        x => unreachable!("get_store_function {x:#?}"),
+                    },
+                    sve = if self.is_scalable() { "s" } else { "" },
+                    size = bl,
                     quad = quad,
+                    len = vec_len.unwrap_or(1),
                 )
             }
-            _ => todo!("get_lane_function IntrinsicType: {:#?}", self),
+            _ => unreachable!("get_store_function IntrinsicType: {self:#?}"),
         }
     }
 
     pub fn from_c(s: &str) -> Result<IntrinsicType, String> {
-        const CONST_STR: &str = "const";
+        const CONST_STR: &str = "const ";
+        const ENUM_STR: &str = "enum ";
+
         if let Some(s) = s.strip_suffix('*') {
-            let (s, constant) = match s.trim().strip_suffix(CONST_STR) {
-                Some(stripped) => (stripped, true),
-                None => (s, false),
+            let (s, constant) = if s.ends_with(CONST_STR) || s.starts_with(CONST_STR) {
+                (
+                    s.trim_start_matches(CONST_STR).trim_end_matches(CONST_STR),
+                    true,
+                )
+            } else {
+                (s, false)
             };
+
             let s = s.trim_end();
+
             Ok(IntrinsicType::Ptr {
                 constant,
                 child: Box::new(IntrinsicType::from_c(s)?),
             })
         } else {
-            // [const ]TYPE[{bitlen}[x{simdlen}[x{vec_len}]]][_t]
-            let (mut s, constant) = match s.strip_prefix(CONST_STR) {
-                Some(stripped) => (stripped.trim(), true),
-                None => (s, false),
+            // [const ][sv]TYPE[{bitlen}[x{simdlen}[x{vec_len}]]]_t
+            //   | [enum ]TYPE
+            let (mut s, constant) = match (s.strip_prefix(CONST_STR), s.strip_prefix(ENUM_STR)) {
+                (Some(const_strip), _) => (const_strip, true),
+                (_, Some(enum_strip)) => (enum_strip, true),
+                (None, None) => (s, false),
             };
             s = s.strip_suffix("_t").unwrap_or(s);
-            let mut parts = s.split('x'); // [[{bitlen}], [{simdlen}], [{vec_len}] ]
+            let sve = s.starts_with("sv");
+
+            let mut parts = s.split('x'); // [[{bitlen}], [{simdlen}], [{vec_len}]]
+
             let start = parts.next().ok_or("Impossible to parse type")?;
+
             if let Some(digit_start) = start.find(|c: char| c.is_ascii_digit()) {
                 let (arg_kind, bit_len) = start.split_at(digit_start);
+
                 let arg_kind = arg_kind.parse::<TypeKind>()?;
                 let bit_len = bit_len.parse::<u32>().map_err(|err| err.to_string())?;
-                let simd_len = match parts.next() {
+                let n1 = match parts.next() {
                     Some(part) => Some(
                         part.parse::<u32>()
                             .map_err(|_| "Couldn't parse simd_len: {part}")?,
                     ),
                     None => None,
                 };
-                let vec_len = match parts.next() {
+                let n2 = match parts.next() {
                     Some(part) => Some(
                         part.parse::<u32>()
                             .map_err(|_| "Couldn't parse vec_len: {part}")?,
                     ),
                     None => None,
                 };
+
                 Ok(IntrinsicType::Type {
                     constant,
                     kind: arg_kind,
                     bit_len: Some(bit_len),
-                    simd_len,
-                    vec_len,
+                    simd_len: if sve {
+                        Some(VecLen::Scalable)
+                    } else {
+                        n1.map(VecLen::Fixed)
+                    },
+                    vec_len: if sve { n1 } else { n2 },
                 })
             } else {
                 let kind = start.parse::<TypeKind>()?;
                 let bit_len = match kind {
-                    TypeKind::Int => Some(32),
+                    TypeKind::SvPattern | TypeKind::SvPrefetchOp | TypeKind::Int => {
+                        // All these are represented as i32
+                        Some(32)
+                    }
+                    TypeKind::Bool => Some(8),
                     _ => None,
                 };
                 Ok(IntrinsicType::Type {
                     constant,
-                    kind: start.parse::<TypeKind>()?,
+                    kind,
                     bit_len,
-                    simd_len: None,
+                    simd_len: if sve && !kind.is_enum() {
+                        Some(VecLen::Scalable)
+                    } else {
+                        None
+                    },
                     vec_len: None,
                 })
             }
         }
     }
 }
+
+// Returns the number of values needed to load `num_vectors` vectors of length `simd_len` with
+// values, `loads` times.
+fn sliding_window_value_count(
+    bit_len: u32,
+    simd_len: Option<VecLen>,
+    num_vectors: u32,
+    loads: u32,
+) -> u32 {
+    // If it's SVE, assume the vector has the largest possible length given the data type.
+    let vector_length = simd_len.map_or(1, |v| {
+        if let VecLen::Fixed(n) = v {
+            n
+        } else {
+            MAX_SVE_BITS / bit_len
+        }
+    });
+    vector_length * num_vectors + loads - 1
+}
diff --git a/crates/intrinsic-test/src/values.rs b/crates/intrinsic-test/src/values.rs
index 1a69a034c8..3311db57d7 100644
--- a/crates/intrinsic-test/src/values.rs
+++ b/crates/intrinsic-test/src/values.rs
@@ -1,3 +1,8 @@
+pub const MAX_SVE_BITS: u32 = 2048;
+
+// All SVE vector lengths must be a multiple of this granule size
+pub const SVE_GRANULE_BITS: u32 = 128;
+
 /// Get a single value for an argument values array in a determistic way.
 /// * `bits`: The number of bits for the type, only 8, 16, 32, 64 are valid values
 /// * `index`: The position in the array we are generating for
@@ -13,8 +18,22 @@ pub fn value_for_array(bits: u32, index: u32) -> u64 {
 }
 
 pub const VALUES_8: &[u8] = &[
-    0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
-    0xf0, 0x80, 0x3b, 0xff,
+    0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf, 0x10, 0x11,
+    0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21,
+    0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31,
+    0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, 0x40, 0x41,
+    0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, 0x50, 0x51,
+    0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, 0x60, 0x61,
+    0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, 0x70, 0x71,
+    0x72, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, 0x80, 0x81,
+    0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91,
+    0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, 0xa0, 0xa1,
+    0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf, 0xb0, 0xb1,
+    0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf, 0xc0, 0xc1,
+    0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf, 0xd0, 0xd1,
+    0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf, 0xe0, 0xe1,
+    0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, 0xf0, 0xf1,
+    0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff,
 ];
 
 pub const VALUES_16: &[u16] = &[
@@ -40,6 +59,17 @@ pub const VALUES_16: &[u16] = &[
     0x0001, // The same values again, but negated.
     0x8000, 0x8400, 0xb7ff, 0xb800, 0xb801, 0xbbff, 0xbc00, 0xbc01, 0xbe00, 0xc900, 0xfbff, 0xfc00,
     0xff23, 0xfe00, 0xfd23, 0xfc01, 0x8012, 0x83ff, 0x8001,
+    0x3555, // As close to 1/3 as possible.
+    // Random values.
+    0xfc00, 0xc000, 0x5140, 0x5800, 0x63d2, 0x5630, 0x3560, 0x9191, 0x4178, 0x6212, 0x67d0, 0x3312,
+    0x4cef, 0x4973, 0x3ecc, 0x5166, 0x4d80, 0x6248, 0x46fd, 0x39c4, 0x39c5, 0x4866, 0x6050, 0x498e,
+    0x4a0f, // The previous values in a different order.
+    0x3555, 0xfc00, 0xc000, 0x9191, 0x5140, 0x5800, 0x8001, 0x83ff, 0x63d2, 0x5630, 0x3560, 0x4178,
+    0x7d23, 0x7c01, 0x0012, 0xb800, 0x03ff, 0x0001, 0x7e00, 0x7f23, 0x8000, 0x8400, 0xb7ff, 0xb801,
+    0x3312, 0x4cef, 0x4973, 0x39c4, 0x3ecc, 0x5166, 0x67d0, 0x6212, 0x4d80, 0x6248, 0x46fd, 0x39c5,
+    0xbc01, 0xbe00, 0xc900, 0xfc01, 0xfbff, 0xfc00, 0xbc00, 0xbbff, 0xff23, 0xfe00, 0xfd23, 0x8012,
+    0x37ff, 0x3800, 0x3801, 0x7bff, 0x3bff, 0x3c00, 0x0400, 0x0000, 0x3c01, 0x3e00, 0x4900, 0x7c00,
+    0x498e, 0x4a0f, 0x6050, 0x4866,
 ];
 
 pub const VALUES_32: &[u32] = &[
@@ -66,7 +96,12 @@ pub const VALUES_32: &[u32] = &[
     0x00000001, // The same values again, but negated.
     0x80000000, 0x80800000, 0xbeffffff, 0xbf000000, 0xbf000001, 0xbf7fffff, 0xbf800000, 0xbf800001,
     0xbfc00000, 0xc1200000, 0xff8fffff, 0xff800000, 0xffd23456, 0xffc00000, 0xff923456, 0xff800001,
-    0x80123456, 0x807fffff, 0x80000001,
+    0x80123456, 0x807fffff, 0x80000001, 0x40490fdb, // Approximately pi.
+    0x3eaaaaab, // Approximately 1/3.
+    // Random values.
+    0x4205cccd, 0x4229178D, 0x42C6A0C5, 0x3B3302F7, 0x3F9DF45E, 0x41DAA3D7, 0x47C3501D, 0xC3889333,
+    0xC2C675C3, 0xC69C449A, 0xC341FD71, 0xC502DFD7, 0xBBB43958, 0x3EE24DD3, 0x42B1C28F, 0x42F06666,
+    0x45D379C3, 0x44637148, 0x3CBBECAB, 0x4113EDFA, 0x444B22F2, 0x1FD93A96, 0x9921055F, 0xFF626925,
 ];
 
 pub const VALUES_64: &[u64] = &[
@@ -117,4 +152,20 @@ pub const VALUES_64: &[u64] = &[
     0x800123456789abcd,
     0x800fffffffffffff,
     0x8000000000000001,
+    0x400921FB54442D18, // Pi.
+    0x3fd5555555555555, // Approximately 1/3.
+];
+
+/// Patterns that are used to construct the predicates for predicated intrinsics.
+pub const PRED_PATTERNS: &[[bool; 16]] = &[
+    [true; 16],
+    [false; 16],
+    [
+        true, false, true, false, true, false, true, false, true, false, true, false, true, false,
+        true, false,
+    ],
+    [
+        false, true, false, true, false, true, false, true, false, true, false, true, false, true,
+        false, true,
+    ],
 ];

From 7f8b9f60575dd610e9902731776552667bceeec9 Mon Sep 17 00:00:00 2001
From: Adam Gemmell <adam.gemmell@arm.com>
Date: Tue, 12 Dec 2023 15:35:48 +0000
Subject: [PATCH 5/6] Improve error messages for load/store tests, rename
 LdIntrCharacteristics

---
 .../src/aarch64/sve/ld_st_tests_sve.rs        | 100 +++++++++---
 .../src/aarch64/sve/ld_st_tests_sve2.rs       | 100 +++++++++---
 crates/stdarch-gen2/src/load_store_tests.rs   | 148 +++++++++---------
 3 files changed, 234 insertions(+), 114 deletions(-)

diff --git a/crates/core_arch/src/aarch64/sve/ld_st_tests_sve.rs b/crates/core_arch/src/aarch64/sve/ld_st_tests_sve.rs
index 1a221f8d62..8a8043b9f4 100644
--- a/crates/core_arch/src/aarch64/sve/ld_st_tests_sve.rs
+++ b/crates/core_arch/src/aarch64/sve/ld_st_tests_sve.rs
@@ -86,72 +86,132 @@ static U64_DATA: LazyLock<[u64; 32 * 5]> = LazyLock::new(|| {
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_f32(vector: svfloat32_t, expected: svfloat32_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b32(), defined));
+    assert!(
+        svptest_first(svptrue_b32(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_f32(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable f32 vector didn't match the expected value"
+    )
 }
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_f64(vector: svfloat64_t, expected: svfloat64_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b64(), defined));
+    assert!(
+        svptest_first(svptrue_b64(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_f64(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable f64 vector didn't match the expected value"
+    )
 }
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_i8(vector: svint8_t, expected: svint8_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b8(), defined));
+    assert!(
+        svptest_first(svptrue_b8(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_s8(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable i8 vector didn't match the expected value"
+    )
 }
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_i16(vector: svint16_t, expected: svint16_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b16(), defined));
+    assert!(
+        svptest_first(svptrue_b16(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_s16(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable i16 vector didn't match the expected value"
+    )
 }
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_i32(vector: svint32_t, expected: svint32_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b32(), defined));
+    assert!(
+        svptest_first(svptrue_b32(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_s32(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable i32 vector didn't match the expected value"
+    )
 }
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_i64(vector: svint64_t, expected: svint64_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b64(), defined));
+    assert!(
+        svptest_first(svptrue_b64(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_s64(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable i64 vector didn't match the expected value"
+    )
 }
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_u8(vector: svuint8_t, expected: svuint8_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b8(), defined));
+    assert!(
+        svptest_first(svptrue_b8(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_u8(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable u8 vector didn't match the expected value"
+    )
 }
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_u16(vector: svuint16_t, expected: svuint16_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b16(), defined));
+    assert!(
+        svptest_first(svptrue_b16(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_u16(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable u16 vector didn't match the expected value"
+    )
 }
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_u32(vector: svuint32_t, expected: svuint32_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b32(), defined));
+    assert!(
+        svptest_first(svptrue_b32(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_u32(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable u32 vector didn't match the expected value"
+    )
 }
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_u64(vector: svuint64_t, expected: svuint64_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b64(), defined));
+    assert!(
+        svptest_first(svptrue_b64(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_u64(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable u64 vector didn't match the expected value"
+    )
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_f32_with_svst1_f32() {
diff --git a/crates/core_arch/src/aarch64/sve/ld_st_tests_sve2.rs b/crates/core_arch/src/aarch64/sve/ld_st_tests_sve2.rs
index 3fc821ec54..dbb63e1bfd 100644
--- a/crates/core_arch/src/aarch64/sve/ld_st_tests_sve2.rs
+++ b/crates/core_arch/src/aarch64/sve/ld_st_tests_sve2.rs
@@ -86,72 +86,132 @@ static U64_DATA: LazyLock<[u64; 32 * 5]> = LazyLock::new(|| {
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_f32(vector: svfloat32_t, expected: svfloat32_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b32(), defined));
+    assert!(
+        svptest_first(svptrue_b32(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_f32(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable f32 vector didn't match the expected value"
+    )
 }
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_f64(vector: svfloat64_t, expected: svfloat64_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b64(), defined));
+    assert!(
+        svptest_first(svptrue_b64(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_f64(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable f64 vector didn't match the expected value"
+    )
 }
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_i8(vector: svint8_t, expected: svint8_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b8(), defined));
+    assert!(
+        svptest_first(svptrue_b8(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_s8(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable i8 vector didn't match the expected value"
+    )
 }
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_i16(vector: svint16_t, expected: svint16_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b16(), defined));
+    assert!(
+        svptest_first(svptrue_b16(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_s16(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable i16 vector didn't match the expected value"
+    )
 }
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_i32(vector: svint32_t, expected: svint32_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b32(), defined));
+    assert!(
+        svptest_first(svptrue_b32(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_s32(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable i32 vector didn't match the expected value"
+    )
 }
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_i64(vector: svint64_t, expected: svint64_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b64(), defined));
+    assert!(
+        svptest_first(svptrue_b64(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_s64(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable i64 vector didn't match the expected value"
+    )
 }
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_u8(vector: svuint8_t, expected: svuint8_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b8(), defined));
+    assert!(
+        svptest_first(svptrue_b8(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_u8(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable u8 vector didn't match the expected value"
+    )
 }
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_u16(vector: svuint16_t, expected: svuint16_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b16(), defined));
+    assert!(
+        svptest_first(svptrue_b16(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_u16(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable u16 vector didn't match the expected value"
+    )
 }
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_u32(vector: svuint32_t, expected: svuint32_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b32(), defined));
+    assert!(
+        svptest_first(svptrue_b32(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_u32(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable u32 vector didn't match the expected value"
+    )
 }
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_u64(vector: svuint64_t, expected: svuint64_t) {
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b64(), defined));
+    assert!(
+        svptest_first(svptrue_b64(), defined),
+        "The intrinsic under test appears to have loaded 0 elements"
+    );
     let cmp = svcmpne_u64(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(
+        !svptest_any(defined, cmp),
+        "Scalable u64 vector didn't match the expected value"
+    )
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_s64index_f64_with_svstnt1_scatter_s64index_f64() {
diff --git a/crates/stdarch-gen2/src/load_store_tests.rs b/crates/stdarch-gen2/src/load_store_tests.rs
index d697a8d22d..d059bcee2d 100644
--- a/crates/stdarch-gen2/src/load_store_tests.rs
+++ b/crates/stdarch-gen2/src/load_store_tests.rs
@@ -123,13 +123,13 @@ fn generate_single_test(
     load: Intrinsic,
     store: Option<Intrinsic>,
 ) -> Result<proc_macro2::TokenStream, String> {
-    let chars = LdIntrCharacteristics::new(&load)?;
+    let props = LoadIntrinsicProps::new(&load)?;
     let fn_name = load.signature.fn_name().to_string();
 
-    if let Some(ty) = &chars.gather_bases_type {
+    if let Some(ty) = &props.gather_bases_type {
         if ty.base_type().unwrap().get_size() == Ok(32)
-            && chars.gather_index_type.is_none()
-            && chars.gather_offset_type.is_none()
+            && props.gather_index_type.is_none()
+            && props.gather_offset_type.is_none()
         {
             // We lack a way to ensure data is in the bottom 32 bits of the address space
             println!("Skipping test for {fn_name}");
@@ -154,7 +154,7 @@ fn generate_single_test(
         }
     );
 
-    let load_type = &chars.load_type;
+    let load_type = &props.load_type;
     let acle_type = load_type.acle_notation_repr();
 
     // If there's no return type, fallback to the load type for things that depend on it
@@ -176,8 +176,8 @@ fn generate_single_test(
     let assert_fn = format_ident!("assert_vector_matches_{rust_ret_type}");
 
     // Use vnum=1, so adjust all values by one vector length
-    let (length_call, vnum_arg) = if chars.vnum {
-        if chars.is_prf {
+    let (length_call, vnum_arg) = if props.vnum {
+        if props.is_prf {
             (quote!(), quote!(, 1))
         } else {
             (quote!(let len = #size_fn() as usize;), quote!(, 1))
@@ -186,14 +186,14 @@ fn generate_single_test(
         (quote!(), quote!())
     };
 
-    let (bases_load, bases_arg) = if let Some(ty) = &chars.gather_bases_type {
+    let (bases_load, bases_arg) = if let Some(ty) = &props.gather_bases_type {
         // Bases is a vector of (sometimes 32-bit) pointers
         // When we combine bases with an offset/index argument, we load from the data arrays
         // starting at 1
         let base_ty = ty.base_type().unwrap();
         let rust_type = format_ident!("{}", base_ty.rust_repr());
         let index_fn = format_ident!("svindex_{}", base_ty.acle_notation_repr());
-        let size_in_bytes = chars.load_type.get_size().unwrap() / 8;
+        let size_in_bytes = props.load_type.get_size().unwrap() / 8;
 
         if base_ty.get_size().unwrap() == 32 {
             // Treat bases as a vector of offsets here - we don't test this without an offset or
@@ -210,7 +210,7 @@ fn generate_single_test(
             let data_array = if store.is_some() {
                 format_ident!("storage")
             } else {
-                format_ident!("{}_DATA", chars.load_type.rust_repr().to_uppercase())
+                format_ident!("{}_DATA", props.load_type.rust_repr().to_uppercase())
             };
 
             let add_fn = format_ident!("svadd_{}_x", base_ty.acle_notation_repr());
@@ -227,9 +227,9 @@ fn generate_single_test(
         (None, quote!())
     };
 
-    let index_arg = if let Some(ty) = &chars.gather_index_type {
+    let index_arg = if let Some(ty) = &props.gather_index_type {
         let rust_type = format_ident!("{}", ty.rust_repr());
-        if chars
+        if props
             .gather_bases_type
             .as_ref()
             .and_then(TypeKind::base_type)
@@ -241,9 +241,9 @@ fn generate_single_test(
             let data_array = if store.is_some() {
                 format_ident!("storage")
             } else {
-                format_ident!("{}_DATA", chars.load_type.rust_repr().to_uppercase())
+                format_ident!("{}_DATA", props.load_type.rust_repr().to_uppercase())
             };
-            let size_in_bytes = chars.load_type.get_size().unwrap() / 8;
+            let size_in_bytes = props.load_type.get_size().unwrap() / 8;
             quote!(, #data_array.as_ptr() as #rust_type / (#size_in_bytes as #rust_type) + 1)
         } else {
             quote!(, 1.try_into().unwrap())
@@ -252,9 +252,9 @@ fn generate_single_test(
         quote!()
     };
 
-    let offset_arg = if let Some(ty) = &chars.gather_offset_type {
-        let size_in_bytes = chars.load_type.get_size().unwrap() / 8;
-        if chars
+    let offset_arg = if let Some(ty) = &props.gather_offset_type {
+        let size_in_bytes = props.load_type.get_size().unwrap() / 8;
+        if props
             .gather_bases_type
             .as_ref()
             .and_then(TypeKind::base_type)
@@ -267,7 +267,7 @@ fn generate_single_test(
             let data_array = if store.is_some() {
                 format_ident!("storage")
             } else {
-                format_ident!("{}_DATA", chars.load_type.rust_repr().to_uppercase())
+                format_ident!("{}_DATA", props.load_type.rust_repr().to_uppercase())
             };
             quote!(, #data_array.as_ptr() as #rust_type + #size_in_bytes as #rust_type)
         } else {
@@ -277,11 +277,11 @@ fn generate_single_test(
         quote!()
     };
 
-    let (offsets_load, offsets_arg) = if let Some(ty) = &chars.gather_offsets_type {
+    let (offsets_load, offsets_arg) = if let Some(ty) = &props.gather_offsets_type {
         // Offsets is a scalable vector of per-element offsets in bytes. We re-use the contiguous
         // data for this, then multiply to get indices
         let offsets_fn = format_ident!("svindex_{}", ty.base_type().unwrap().acle_notation_repr());
-        let size_in_bytes = chars.load_type.get_size().unwrap() / 8;
+        let size_in_bytes = props.load_type.get_size().unwrap() / 8;
         (
             Some(quote! {
                 let offsets = #offsets_fn(0, #size_in_bytes.try_into().unwrap());
@@ -292,7 +292,7 @@ fn generate_single_test(
         (None, quote!())
     };
 
-    let (indices_load, indices_arg) = if let Some(ty) = &chars.gather_indices_type {
+    let (indices_load, indices_arg) = if let Some(ty) = &props.gather_indices_type {
         // There's no need to multiply indices by the load type width
         let base_ty = ty.base_type().unwrap();
         let indices_fn = format_ident!("svindex_{}", base_ty.acle_notation_repr());
@@ -306,21 +306,21 @@ fn generate_single_test(
         (None, quote!())
     };
 
-    let ptr = if chars.gather_bases_type.is_some() {
+    let ptr = if props.gather_bases_type.is_some() {
         quote!()
-    } else if chars.is_prf {
+    } else if props.is_prf {
         quote!(, I64_DATA.as_ptr())
     } else {
         quote!(, #data_array.as_ptr())
     };
 
-    let tuple_len = &chars.tuple_len;
-    let expecteds = if chars.is_prf {
+    let tuple_len = &props.tuple_len;
+    let expecteds = if props.is_prf {
         // No return value for prefetches
         vec![]
     } else {
         (0..*tuple_len)
-            .map(|i| get_expected_range(i, &chars))
+            .map(|i| get_expected_range(i, &props))
             .collect()
     };
     let asserts: Vec<_> =
@@ -336,7 +336,7 @@ fn generate_single_test(
                 .collect()
         };
 
-    let function = if chars.is_prf {
+    let function = if props.is_prf {
         if fn_name.contains("gather") && fn_name.contains("base") && !fn_name.starts_with("svprf_")
         {
             // svprf(b|h|w|d)_gather base intrinsics do not have a generic type parameter
@@ -348,7 +348,7 @@ fn generate_single_test(
         quote!(#fn_ident)
     };
 
-    let octaword_guard = if chars.replicate_width == Some(256) {
+    let octaword_guard = if props.replicate_width == Some(256) {
         let msg = format!("Skipping {test_name} due to SVE vector length");
         quote! {
             if svcntb() < 32 {
@@ -369,7 +369,7 @@ fn generate_single_test(
             let create = format_ident!("svcreate{tuple_len}_{acle_type}");
             quote!(#create(#(#expecteds),*))
         };
-        let input = store.input.types.get(0).unwrap().get(0).unwrap();
+        let input = store.input.types.first().unwrap().get(0).unwrap();
         let store_type = input
             .get(store.test.get_typeset_index().unwrap())
             .and_then(InputType::typekind)
@@ -377,10 +377,10 @@ fn generate_single_test(
             .unwrap();
 
         let store_type = format_ident!("{}", store_type.rust_repr());
-        let storage_len = NUM_VECS * VL_MAX_BITS / chars.load_type.get_size()? as usize;
+        let storage_len = NUM_VECS * VL_MAX_BITS / props.load_type.get_size()? as usize;
         let store_fn = format_ident!("{}", store.signature.fn_name().to_string());
-        let load_type = format_ident!("{}", chars.load_type.rust_repr());
-        let (store_ptr, store_mut_ptr) = if chars.gather_bases_type.is_none() {
+        let load_type = format_ident!("{}", props.load_type.rust_repr());
+        let (store_ptr, store_mut_ptr) = if props.gather_bases_type.is_none() {
             (
                 quote!(, storage.as_ptr() as *const #load_type),
                 quote!(, storage.as_mut_ptr()),
@@ -389,7 +389,7 @@ fn generate_single_test(
             (quote!(), quote!())
         };
         let args = quote!(#pred_fn() #store_ptr #vnum_arg #bases_arg #offset_arg #index_arg #offsets_arg #indices_arg);
-        let call = if chars.uses_ffr {
+        let call = if props.uses_ffr {
             // Doing a normal load first maximises the number of elements our ff/nf test loads
             let non_ffr_fn_name = format_ident!(
                 "{}",
@@ -433,7 +433,7 @@ fn generate_single_test(
         })
     } else {
         let args = quote!(#pred_fn() #ptr #vnum_arg #bases_arg #offset_arg #index_arg #offsets_arg #indices_arg);
-        let call = if chars.uses_ffr {
+        let call = if props.uses_ffr {
             // Doing a normal load first maximises the number of elements our ff/nf test loads
             let non_ffr_fn_name = format_ident!(
                 "{}",
@@ -469,28 +469,28 @@ fn generate_single_test(
     }
 }
 
-/// Assumes chars.ret_type is not None
-fn get_expected_range(tuple_idx: usize, chars: &LdIntrCharacteristics) -> proc_macro2::TokenStream {
+/// Assumes props.ret_type is not None
+fn get_expected_range(tuple_idx: usize, props: &LoadIntrinsicProps) -> proc_macro2::TokenStream {
     // vnum=1
-    let vnum_adjust = if chars.vnum { quote!(len+) } else { quote!() };
+    let vnum_adjust = if props.vnum { quote!(len+) } else { quote!() };
 
     let bases_adjust =
-        (chars.gather_index_type.is_some() || chars.gather_offset_type.is_some()) as usize;
+        (props.gather_index_type.is_some() || props.gather_offset_type.is_some()) as usize;
 
-    let tuple_len = chars.tuple_len;
-    let size = chars
+    let tuple_len = props.tuple_len;
+    let size = props
         .ret_type
         .as_ref()
         .and_then(TypeKind::base_type)
-        .unwrap_or(&chars.load_type)
+        .unwrap_or(&props.load_type)
         .get_size()
         .unwrap() as usize;
 
-    if chars.replicate_width == Some(128) {
+    if props.replicate_width == Some(128) {
         // svld1rq
         let ty_rust = format_ident!(
             "{}",
-            chars
+            props
                 .ret_type
                 .as_ref()
                 .unwrap()
@@ -501,14 +501,14 @@ fn get_expected_range(tuple_idx: usize, chars: &LdIntrCharacteristics) -> proc_m
         let args: Vec<_> = (0..(128 / size)).map(|i| quote!(#i as #ty_rust)).collect();
         let dup = format_ident!(
             "svdupq_n_{}",
-            chars.ret_type.as_ref().unwrap().acle_notation_repr()
+            props.ret_type.as_ref().unwrap().acle_notation_repr()
         );
         quote!(#dup(#(#args,)*))
-    } else if chars.replicate_width == Some(256) {
+    } else if props.replicate_width == Some(256) {
         // svld1ro - we use two interleaved svdups to create a repeating 256-bit pattern
         let ty_rust = format_ident!(
             "{}",
-            chars
+            props
                 .ret_type
                 .as_ref()
                 .unwrap()
@@ -516,7 +516,7 @@ fn get_expected_range(tuple_idx: usize, chars: &LdIntrCharacteristics) -> proc_m
                 .unwrap()
                 .rust_repr()
         );
-        let ret_acle = chars.ret_type.as_ref().unwrap().acle_notation_repr();
+        let ret_acle = props.ret_type.as_ref().unwrap().acle_notation_repr();
         let args: Vec<_> = (0..(128 / size)).map(|i| quote!(#i as #ty_rust)).collect();
         let args2: Vec<_> = ((128 / size)..(256 / size))
             .map(|i| quote!(#i as #ty_rust))
@@ -526,7 +526,7 @@ fn get_expected_range(tuple_idx: usize, chars: &LdIntrCharacteristics) -> proc_m
         quote!(#interleave(#dup(#(#args,)*), #dup(#(#args2,)*)))
     } else {
         let start = bases_adjust + tuple_idx;
-        if chars
+        if props
             .ret_type
             .as_ref()
             .unwrap()
@@ -540,14 +540,14 @@ fn get_expected_range(tuple_idx: usize, chars: &LdIntrCharacteristics) -> proc_m
             let svindex_fn = format_ident!("svindex_s{size}");
             quote! { #cvt_fn(#pred_fn(), #svindex_fn((#vnum_adjust #start).try_into().unwrap(), #tuple_len.try_into().unwrap()))}
         } else {
-            let ret_acle = chars.ret_type.as_ref().unwrap().acle_notation_repr();
+            let ret_acle = props.ret_type.as_ref().unwrap().acle_notation_repr();
             let svindex = format_ident!("svindex_{ret_acle}");
             quote!(#svindex((#vnum_adjust #start).try_into().unwrap(), #tuple_len.try_into().unwrap()))
         }
     }
 }
 
-struct LdIntrCharacteristics {
+struct LoadIntrinsicProps {
     // The data type to load from (not necessarily the data type returned)
     load_type: BaseType,
     // The data type to return (None for unit)
@@ -574,9 +574,9 @@ struct LdIntrCharacteristics {
     gather_indices_type: Option<TypeKind>,
 }
 
-impl LdIntrCharacteristics {
-    fn new(intr: &Intrinsic) -> Result<LdIntrCharacteristics, String> {
-        let input = intr.input.types.get(0).unwrap().get(0).unwrap();
+impl LoadIntrinsicProps {
+    fn new(intr: &Intrinsic) -> Result<LoadIntrinsicProps, String> {
+        let input = intr.input.types.first().unwrap().get(0).unwrap();
         let load_type = input
             .get(intr.test.get_typeset_index().unwrap())
             .and_then(InputType::typekind)
@@ -618,7 +618,7 @@ impl LdIntrCharacteristics {
         let gather_offsets_type = get_ty_of_arg("offsets");
         let gather_indices_type = get_ty_of_arg("indices");
 
-        Ok(LdIntrCharacteristics {
+        Ok(LoadIntrinsicProps {
             load_type: *load_type,
             ret_type,
             tuple_len,
@@ -720,81 +720,81 @@ static U64_DATA: LazyLock<[u64; {LEN_U64} * {NUM_VECS}]> = LazyLock::new(|| {{
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_f32(vector: svfloat32_t, expected: svfloat32_t) {{
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b32(), defined));
+    assert!(svptest_first(svptrue_b32(), defined), "The intrinsic under test appears to have loaded 0 elements");
     let cmp = svcmpne_f32(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(!svptest_any(defined, cmp), "Scalable f32 vector didn't match the expected value")
 }}
 
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_f64(vector: svfloat64_t, expected: svfloat64_t) {{
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b64(), defined));
+    assert!(svptest_first(svptrue_b64(), defined), "The intrinsic under test appears to have loaded 0 elements");
     let cmp = svcmpne_f64(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(!svptest_any(defined, cmp), "Scalable f64 vector didn't match the expected value")
 }}
 
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_i8(vector: svint8_t, expected: svint8_t) {{
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b8(), defined));
+    assert!(svptest_first(svptrue_b8(), defined), "The intrinsic under test appears to have loaded 0 elements");
     let cmp = svcmpne_s8(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(!svptest_any(defined, cmp), "Scalable i8 vector didn't match the expected value")
 }}
 
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_i16(vector: svint16_t, expected: svint16_t) {{
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b16(), defined));
+    assert!(svptest_first(svptrue_b16(), defined), "The intrinsic under test appears to have loaded 0 elements");
     let cmp = svcmpne_s16(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(!svptest_any(defined, cmp), "Scalable i16 vector didn't match the expected value")
 }}
 
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_i32(vector: svint32_t, expected: svint32_t) {{
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b32(), defined));
+    assert!(svptest_first(svptrue_b32(), defined), "The intrinsic under test appears to have loaded 0 elements");
     let cmp = svcmpne_s32(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(!svptest_any(defined, cmp), "Scalable i32 vector didn't match the expected value")
 }}
 
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_i64(vector: svint64_t, expected: svint64_t) {{
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b64(), defined));
+    assert!(svptest_first(svptrue_b64(), defined), "The intrinsic under test appears to have loaded 0 elements");
     let cmp = svcmpne_s64(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(!svptest_any(defined, cmp), "Scalable i64 vector didn't match the expected value")
 }}
 
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_u8(vector: svuint8_t, expected: svuint8_t) {{
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b8(), defined));
+    assert!(svptest_first(svptrue_b8(), defined), "The intrinsic under test appears to have loaded 0 elements");
     let cmp = svcmpne_u8(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(!svptest_any(defined, cmp), "Scalable u8 vector didn't match the expected value")
 }}
 
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_u16(vector: svuint16_t, expected: svuint16_t) {{
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b16(), defined));
+    assert!(svptest_first(svptrue_b16(), defined), "The intrinsic under test appears to have loaded 0 elements");
     let cmp = svcmpne_u16(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(!svptest_any(defined, cmp), "Scalable u16 vector didn't match the expected value")
 }}
 
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_u32(vector: svuint32_t, expected: svuint32_t) {{
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b32(), defined));
+    assert!(svptest_first(svptrue_b32(), defined), "The intrinsic under test appears to have loaded 0 elements");
     let cmp = svcmpne_u32(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(!svptest_any(defined, cmp), "Scalable u32 vector didn't match the expected value")
 }}
 
 #[target_feature(enable = "sve")]
 fn assert_vector_matches_u64(vector: svuint64_t, expected: svuint64_t) {{
     let defined = svrdffr();
-    assert!(svptest_first(svptrue_b64(), defined));
+    assert!(svptest_first(svptrue_b64(), defined), "The intrinsic under test appears to have loaded 0 elements");
     let cmp = svcmpne_u64(defined, vector, expected);
-    assert!(!svptest_any(defined, cmp))
+    assert!(!svptest_any(defined, cmp), "Scalable u64 vector didn't match the expected value")
 }}
 "#
     );

From 8730686dfe7ffcf6d70adcba4486dc3b176ac502 Mon Sep 17 00:00:00 2001
From: Adam Gemmell <adam.gemmell@arm.com>
Date: Tue, 12 Dec 2023 15:37:15 +0000
Subject: [PATCH 6/6] Fix load/store tests for 2048 vector lengths

---
 .../aarch64-unknown-linux-gnu/Dockerfile      |    2 +-
 .../src/aarch64/sve/ld_st_tests_sve.rs        | 2552 ++++++-----------
 .../src/aarch64/sve/ld_st_tests_sve2.rs       |  448 +--
 crates/stdarch-gen2/src/load_store_tests.rs   |   11 +-
 4 files changed, 1079 insertions(+), 1934 deletions(-)

diff --git a/ci/docker/aarch64-unknown-linux-gnu/Dockerfile b/ci/docker/aarch64-unknown-linux-gnu/Dockerfile
index 87cec1e394..2a4beba345 100644
--- a/ci/docker/aarch64-unknown-linux-gnu/Dockerfile
+++ b/ci/docker/aarch64-unknown-linux-gnu/Dockerfile
@@ -14,5 +14,5 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
   lld
 
 ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc \
-    CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER="qemu-aarch64 -cpu max,sve512=on -L /usr/aarch64-linux-gnu" \
+    CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_RUNNER="qemu-aarch64 -cpu max,sve-default-vector-length=256 -L /usr/aarch64-linux-gnu" \
     OBJDUMP=aarch64-linux-gnu-objdump
diff --git a/crates/core_arch/src/aarch64/sve/ld_st_tests_sve.rs b/crates/core_arch/src/aarch64/sve/ld_st_tests_sve.rs
index 8a8043b9f4..0c3fb343cb 100644
--- a/crates/core_arch/src/aarch64/sve/ld_st_tests_sve.rs
+++ b/crates/core_arch/src/aarch64/sve/ld_st_tests_sve.rs
@@ -258,7 +258,7 @@ unsafe fn test_svld1_f64_with_svst1_f64() {
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_s8_with_svst1_s8() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s8((0usize) as i8, 1usize.try_into().unwrap());
     svst1_s8(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i8 || val == i as i8);
@@ -267,13 +267,13 @@ unsafe fn test_svld1_s8_with_svst1_s8() {
     let loaded = svld1_s8(svptrue_b8(), storage.as_ptr() as *const i8);
     assert_vector_matches_i8(
         loaded,
-        svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s8((0usize) as i8, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_s16_with_svst1_s16() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s16((0usize) as i16, 1usize.try_into().unwrap());
     svst1_s16(svptrue_b16(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i16 || val == i as i16);
@@ -282,13 +282,13 @@ unsafe fn test_svld1_s16_with_svst1_s16() {
     let loaded = svld1_s16(svptrue_b16(), storage.as_ptr() as *const i16);
     assert_vector_matches_i16(
         loaded,
-        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s16((0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_s32_with_svst1_s32() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     svst1_s32(svptrue_b32(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i32 || val == i as i32);
@@ -297,13 +297,13 @@ unsafe fn test_svld1_s32_with_svst1_s32() {
     let loaded = svld1_s32(svptrue_b32(), storage.as_ptr() as *const i32);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_s64_with_svst1_s64() {
     let mut storage = [0 as i64; 160usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     svst1_s64(svptrue_b64(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i64 || val == i as i64);
@@ -312,13 +312,13 @@ unsafe fn test_svld1_s64_with_svst1_s64() {
     let loaded = svld1_s64(svptrue_b64(), storage.as_ptr() as *const i64);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_u8_with_svst1_u8() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u8((0usize) as u8, 1usize.try_into().unwrap());
     svst1_u8(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u8 || val == i as u8);
@@ -327,13 +327,13 @@ unsafe fn test_svld1_u8_with_svst1_u8() {
     let loaded = svld1_u8(svptrue_b8(), storage.as_ptr() as *const u8);
     assert_vector_matches_u8(
         loaded,
-        svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u8((0usize) as u8, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_u16_with_svst1_u16() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u16((0usize) as u16, 1usize.try_into().unwrap());
     svst1_u16(svptrue_b16(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u16 || val == i as u16);
@@ -342,13 +342,13 @@ unsafe fn test_svld1_u16_with_svst1_u16() {
     let loaded = svld1_u16(svptrue_b16(), storage.as_ptr() as *const u16);
     assert_vector_matches_u16(
         loaded,
-        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u16((0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_u32_with_svst1_u32() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     svst1_u32(svptrue_b32(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u32 || val == i as u32);
@@ -357,13 +357,13 @@ unsafe fn test_svld1_u32_with_svst1_u32() {
     let loaded = svld1_u32(svptrue_b32(), storage.as_ptr() as *const u32);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_u64_with_svst1_u64() {
     let mut storage = [0 as u64; 160usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     svst1_u64(svptrue_b64(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u64 || val == i as u64);
@@ -372,7 +372,7 @@ unsafe fn test_svld1_u64_with_svst1_u64() {
     let loaded = svld1_u64(svptrue_b64(), storage.as_ptr() as *const u64);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -400,7 +400,7 @@ unsafe fn test_svld1_gather_s32index_f32_with_svst1_scatter_s32index_f32() {
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_s32index_s32_with_svst1_scatter_s32index_s32() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let indices = svindex_s32(0, 1);
     svst1_scatter_s32index_s32(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -410,13 +410,13 @@ unsafe fn test_svld1_gather_s32index_s32_with_svst1_scatter_s32index_s32() {
     let loaded = svld1_gather_s32index_s32(svptrue_b32(), storage.as_ptr() as *const i32, indices);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_s32index_u32_with_svst1_scatter_s32index_u32() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let indices = svindex_s32(0, 1);
     svst1_scatter_s32index_u32(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -426,7 +426,7 @@ unsafe fn test_svld1_gather_s32index_u32_with_svst1_scatter_s32index_u32() {
     let loaded = svld1_gather_s32index_u32(svptrue_b32(), storage.as_ptr() as *const u32, indices);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -454,7 +454,7 @@ unsafe fn test_svld1_gather_s64index_f64_with_svst1_scatter_s64index_f64() {
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_s64index_s64_with_svst1_scatter_s64index_s64() {
     let mut storage = [0 as i64; 160usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svst1_scatter_s64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -464,13 +464,13 @@ unsafe fn test_svld1_gather_s64index_s64_with_svst1_scatter_s64index_s64() {
     let loaded = svld1_gather_s64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_s64index_u64_with_svst1_scatter_s64index_u64() {
     let mut storage = [0 as u64; 160usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svst1_scatter_s64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -480,7 +480,7 @@ unsafe fn test_svld1_gather_s64index_u64_with_svst1_scatter_s64index_u64() {
     let loaded = svld1_gather_s64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -508,7 +508,7 @@ unsafe fn test_svld1_gather_u32index_f32_with_svst1_scatter_u32index_f32() {
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_u32index_s32_with_svst1_scatter_u32index_s32() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let indices = svindex_u32(0, 1);
     svst1_scatter_u32index_s32(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -518,13 +518,13 @@ unsafe fn test_svld1_gather_u32index_s32_with_svst1_scatter_u32index_s32() {
     let loaded = svld1_gather_u32index_s32(svptrue_b32(), storage.as_ptr() as *const i32, indices);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_u32index_u32_with_svst1_scatter_u32index_u32() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let indices = svindex_u32(0, 1);
     svst1_scatter_u32index_u32(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -534,7 +534,7 @@ unsafe fn test_svld1_gather_u32index_u32_with_svst1_scatter_u32index_u32() {
     let loaded = svld1_gather_u32index_u32(svptrue_b32(), storage.as_ptr() as *const u32, indices);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -562,7 +562,7 @@ unsafe fn test_svld1_gather_u64index_f64_with_svst1_scatter_u64index_f64() {
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_u64index_s64_with_svst1_scatter_u64index_s64() {
     let mut storage = [0 as i64; 160usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svst1_scatter_u64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -572,13 +572,13 @@ unsafe fn test_svld1_gather_u64index_s64_with_svst1_scatter_u64index_s64() {
     let loaded = svld1_gather_u64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_u64index_u64_with_svst1_scatter_u64index_u64() {
     let mut storage = [0 as u64; 160usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svst1_scatter_u64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -588,7 +588,7 @@ unsafe fn test_svld1_gather_u64index_u64_with_svst1_scatter_u64index_u64() {
     let loaded = svld1_gather_u64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -616,7 +616,7 @@ unsafe fn test_svld1_gather_s32offset_f32_with_svst1_scatter_s32offset_f32() {
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_s32offset_s32_with_svst1_scatter_s32offset_s32() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let offsets = svindex_s32(0, 4u32.try_into().unwrap());
     svst1_scatter_s32offset_s32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -626,13 +626,13 @@ unsafe fn test_svld1_gather_s32offset_s32_with_svst1_scatter_s32offset_s32() {
     let loaded = svld1_gather_s32offset_s32(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_s32offset_u32_with_svst1_scatter_s32offset_u32() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let offsets = svindex_s32(0, 4u32.try_into().unwrap());
     svst1_scatter_s32offset_u32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -642,7 +642,7 @@ unsafe fn test_svld1_gather_s32offset_u32_with_svst1_scatter_s32offset_u32() {
     let loaded = svld1_gather_s32offset_u32(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -670,7 +670,7 @@ unsafe fn test_svld1_gather_s64offset_f64_with_svst1_scatter_s64offset_f64() {
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_s64offset_s64_with_svst1_scatter_s64offset_s64() {
     let mut storage = [0 as i64; 160usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 8u32.try_into().unwrap());
     svst1_scatter_s64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -680,13 +680,13 @@ unsafe fn test_svld1_gather_s64offset_s64_with_svst1_scatter_s64offset_s64() {
     let loaded = svld1_gather_s64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_s64offset_u64_with_svst1_scatter_s64offset_u64() {
     let mut storage = [0 as u64; 160usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 8u32.try_into().unwrap());
     svst1_scatter_s64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -696,7 +696,7 @@ unsafe fn test_svld1_gather_s64offset_u64_with_svst1_scatter_s64offset_u64() {
     let loaded = svld1_gather_s64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -724,7 +724,7 @@ unsafe fn test_svld1_gather_u32offset_f32_with_svst1_scatter_u32offset_f32() {
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_u32offset_s32_with_svst1_scatter_u32offset_s32() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 4u32.try_into().unwrap());
     svst1_scatter_u32offset_s32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -734,13 +734,13 @@ unsafe fn test_svld1_gather_u32offset_s32_with_svst1_scatter_u32offset_s32() {
     let loaded = svld1_gather_u32offset_s32(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_u32offset_u32_with_svst1_scatter_u32offset_u32() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 4u32.try_into().unwrap());
     svst1_scatter_u32offset_u32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -750,7 +750,7 @@ unsafe fn test_svld1_gather_u32offset_u32_with_svst1_scatter_u32offset_u32() {
     let loaded = svld1_gather_u32offset_u32(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -778,7 +778,7 @@ unsafe fn test_svld1_gather_u64offset_f64_with_svst1_scatter_u64offset_f64() {
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_u64offset_s64_with_svst1_scatter_u64offset_s64() {
     let mut storage = [0 as i64; 160usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 8u32.try_into().unwrap());
     svst1_scatter_u64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -788,13 +788,13 @@ unsafe fn test_svld1_gather_u64offset_s64_with_svst1_scatter_u64offset_s64() {
     let loaded = svld1_gather_u64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_u64offset_u64_with_svst1_scatter_u64offset_u64() {
     let mut storage = [0 as u64; 160usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 8u32.try_into().unwrap());
     svst1_scatter_u64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -804,7 +804,7 @@ unsafe fn test_svld1_gather_u64offset_u64_with_svst1_scatter_u64offset_u64() {
     let loaded = svld1_gather_u64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -834,7 +834,7 @@ unsafe fn test_svld1_gather_u64base_f64_with_svst1_scatter_u64base_f64() {
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_u64base_s64_with_svst1_scatter_u64base_s64() {
     let mut storage = [0 as i64; 160usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 8u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
@@ -846,13 +846,13 @@ unsafe fn test_svld1_gather_u64base_s64_with_svst1_scatter_u64base_s64() {
     let loaded = svld1_gather_u64base_s64(svptrue_b64(), bases);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_u64base_u64_with_svst1_scatter_u64base_u64() {
     let mut storage = [0 as u64; 160usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 8u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
@@ -864,7 +864,7 @@ unsafe fn test_svld1_gather_u64base_u64_with_svst1_scatter_u64base_u64() {
     let loaded = svld1_gather_u64base_u64(svptrue_b64(), bases);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -901,7 +901,7 @@ unsafe fn test_svld1_gather_u32base_index_f32_with_svst1_scatter_u32base_index_f
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_u32base_index_s32_with_svst1_scatter_u32base_index_s32() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((1usize) as i32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 4u32.try_into().unwrap());
     svst1_scatter_u32base_index_s32(
         svptrue_b32(),
@@ -920,13 +920,13 @@ unsafe fn test_svld1_gather_u32base_index_s32_with_svst1_scatter_u32base_index_s
     );
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_u32base_index_u32_with_svst1_scatter_u32base_index_u32() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((1usize) as u32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 4u32.try_into().unwrap());
     svst1_scatter_u32base_index_u32(
         svptrue_b32(),
@@ -945,7 +945,7 @@ unsafe fn test_svld1_gather_u32base_index_u32_with_svst1_scatter_u32base_index_u
     );
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -975,7 +975,7 @@ unsafe fn test_svld1_gather_u64base_index_f64_with_svst1_scatter_u64base_index_f
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_u64base_index_s64_with_svst1_scatter_u64base_index_s64() {
     let mut storage = [0 as i64; 160usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 8u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
@@ -987,13 +987,13 @@ unsafe fn test_svld1_gather_u64base_index_s64_with_svst1_scatter_u64base_index_s
     let loaded = svld1_gather_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_u64base_index_u64_with_svst1_scatter_u64base_index_u64() {
     let mut storage = [0 as u64; 160usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 8u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
@@ -1005,7 +1005,7 @@ unsafe fn test_svld1_gather_u64base_index_u64_with_svst1_scatter_u64base_index_u
     let loaded = svld1_gather_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -1042,7 +1042,7 @@ unsafe fn test_svld1_gather_u32base_offset_f32_with_svst1_scatter_u32base_offset
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_u32base_offset_s32_with_svst1_scatter_u32base_offset_s32() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((1usize) as i32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 4u32.try_into().unwrap());
     svst1_scatter_u32base_offset_s32(
         svptrue_b32(),
@@ -1061,13 +1061,13 @@ unsafe fn test_svld1_gather_u32base_offset_s32_with_svst1_scatter_u32base_offset
     );
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_u32base_offset_u32_with_svst1_scatter_u32base_offset_u32() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((1usize) as u32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 4u32.try_into().unwrap());
     svst1_scatter_u32base_offset_u32(
         svptrue_b32(),
@@ -1086,7 +1086,7 @@ unsafe fn test_svld1_gather_u32base_offset_u32_with_svst1_scatter_u32base_offset
     );
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -1116,7 +1116,7 @@ unsafe fn test_svld1_gather_u64base_offset_f64_with_svst1_scatter_u64base_offset
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_u64base_offset_s64_with_svst1_scatter_u64base_offset_s64() {
     let mut storage = [0 as i64; 160usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 8u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
@@ -1128,13 +1128,13 @@ unsafe fn test_svld1_gather_u64base_offset_s64_with_svst1_scatter_u64base_offset
     let loaded = svld1_gather_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_gather_u64base_offset_u64_with_svst1_scatter_u64base_offset_u64() {
     let mut storage = [0 as u64; 160usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 8u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
@@ -1146,7 +1146,7 @@ unsafe fn test_svld1_gather_u64base_offset_u64_with_svst1_scatter_u64base_offset
     let loaded = svld1_gather_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -1209,10 +1209,7 @@ unsafe fn test_svld1_vnum_f64_with_svst1_vnum_f64() {
 unsafe fn test_svld1_vnum_s8_with_svst1_vnum_s8() {
     let len = svcntb() as usize;
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s8(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s8((len + 0usize) as i8, 1usize.try_into().unwrap());
     svst1_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i8 || val == i as i8);
@@ -1221,20 +1218,14 @@ unsafe fn test_svld1_vnum_s8_with_svst1_vnum_s8() {
     let loaded = svld1_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1);
     assert_vector_matches_i8(
         loaded,
-        svindex_s8(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s8((len + 0usize) as i8, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_vnum_s16_with_svst1_vnum_s16() {
     let len = svcnth() as usize;
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s16(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s16((len + 0usize) as i16, 1usize.try_into().unwrap());
     svst1_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i16 || val == i as i16);
@@ -1243,20 +1234,14 @@ unsafe fn test_svld1_vnum_s16_with_svst1_vnum_s16() {
     let loaded = svld1_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1);
     assert_vector_matches_i16(
         loaded,
-        svindex_s16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_vnum_s32_with_svst1_vnum_s32() {
     let len = svcntw() as usize;
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s32(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap());
     svst1_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i32 || val == i as i32);
@@ -1265,20 +1250,14 @@ unsafe fn test_svld1_vnum_s32_with_svst1_vnum_s32() {
     let loaded = svld1_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_vnum_s64_with_svst1_vnum_s64() {
     let len = svcntd() as usize;
     let mut storage = [0 as i64; 160usize];
-    let data = svindex_s64(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap());
     svst1_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i64 || val == i as i64);
@@ -1287,20 +1266,14 @@ unsafe fn test_svld1_vnum_s64_with_svst1_vnum_s64() {
     let loaded = svld1_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_vnum_u8_with_svst1_vnum_u8() {
     let len = svcntb() as usize;
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u8(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u8((len + 0usize) as u8, 1usize.try_into().unwrap());
     svst1_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u8 || val == i as u8);
@@ -1309,20 +1282,14 @@ unsafe fn test_svld1_vnum_u8_with_svst1_vnum_u8() {
     let loaded = svld1_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1);
     assert_vector_matches_u8(
         loaded,
-        svindex_u8(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u8((len + 0usize) as u8, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_vnum_u16_with_svst1_vnum_u16() {
     let len = svcnth() as usize;
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u16(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u16((len + 0usize) as u16, 1usize.try_into().unwrap());
     svst1_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u16 || val == i as u16);
@@ -1331,20 +1298,14 @@ unsafe fn test_svld1_vnum_u16_with_svst1_vnum_u16() {
     let loaded = svld1_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1);
     assert_vector_matches_u16(
         loaded,
-        svindex_u16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_vnum_u32_with_svst1_vnum_u32() {
     let len = svcntw() as usize;
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u32(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap());
     svst1_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u32 || val == i as u32);
@@ -1353,20 +1314,14 @@ unsafe fn test_svld1_vnum_u32_with_svst1_vnum_u32() {
     let loaded = svld1_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1_vnum_u64_with_svst1_vnum_u64() {
     let len = svcntd() as usize;
     let mut storage = [0 as u64; 160usize];
-    let data = svindex_u64(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap());
     svst1_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u64 || val == i as u64);
@@ -1375,10 +1330,7 @@ unsafe fn test_svld1_vnum_u64_with_svst1_vnum_u64() {
     let loaded = svld1_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,f64mm")]
@@ -1781,7 +1733,7 @@ unsafe fn test_svld1rq_u64() {
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_gather_s32offset_s32_with_svst1b_scatter_s32offset_s32() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let offsets = svindex_s32(0, 1u32.try_into().unwrap());
     svst1b_scatter_s32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1791,13 +1743,13 @@ unsafe fn test_svld1sb_gather_s32offset_s32_with_svst1b_scatter_s32offset_s32()
     let loaded = svld1sb_gather_s32offset_s32(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_s32offset_s32_with_svst1h_scatter_s32offset_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let offsets = svindex_s32(0, 2u32.try_into().unwrap());
     svst1h_scatter_s32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1808,13 +1760,13 @@ unsafe fn test_svld1sh_gather_s32offset_s32_with_svst1h_scatter_s32offset_s32()
         svld1sh_gather_s32offset_s32(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_gather_s32offset_u32_with_svst1b_scatter_s32offset_u32() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let offsets = svindex_s32(0, 1u32.try_into().unwrap());
     svst1b_scatter_s32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1824,13 +1776,13 @@ unsafe fn test_svld1sb_gather_s32offset_u32_with_svst1b_scatter_s32offset_u32()
     let loaded = svld1sb_gather_s32offset_u32(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_s32offset_u32_with_svst1h_scatter_s32offset_u32() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let offsets = svindex_s32(0, 2u32.try_into().unwrap());
     svst1h_scatter_s32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1841,13 +1793,13 @@ unsafe fn test_svld1sh_gather_s32offset_u32_with_svst1h_scatter_s32offset_u32()
         svld1sh_gather_s32offset_u32(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_gather_s64offset_s64_with_svst1b_scatter_s64offset_s64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 1u32.try_into().unwrap());
     svst1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1857,13 +1809,13 @@ unsafe fn test_svld1sb_gather_s64offset_s64_with_svst1b_scatter_s64offset_s64()
     let loaded = svld1sb_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_s64offset_s64_with_svst1h_scatter_s64offset_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 2u32.try_into().unwrap());
     svst1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1874,13 +1826,13 @@ unsafe fn test_svld1sh_gather_s64offset_s64_with_svst1h_scatter_s64offset_s64()
         svld1sh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sw_gather_s64offset_s64_with_svst1w_scatter_s64offset_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 4u32.try_into().unwrap());
     svst1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1891,13 +1843,13 @@ unsafe fn test_svld1sw_gather_s64offset_s64_with_svst1w_scatter_s64offset_s64()
         svld1sw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_gather_s64offset_u64_with_svst1b_scatter_s64offset_u64() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 1u32.try_into().unwrap());
     svst1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1907,13 +1859,13 @@ unsafe fn test_svld1sb_gather_s64offset_u64_with_svst1b_scatter_s64offset_u64()
     let loaded = svld1sb_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_s64offset_u64_with_svst1h_scatter_s64offset_u64() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 2u32.try_into().unwrap());
     svst1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1924,13 +1876,13 @@ unsafe fn test_svld1sh_gather_s64offset_u64_with_svst1h_scatter_s64offset_u64()
         svld1sh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sw_gather_s64offset_u64_with_svst1w_scatter_s64offset_u64() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 4u32.try_into().unwrap());
     svst1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1941,13 +1893,13 @@ unsafe fn test_svld1sw_gather_s64offset_u64_with_svst1w_scatter_s64offset_u64()
         svld1sw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_gather_u32offset_s32_with_svst1b_scatter_u32offset_s32() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 1u32.try_into().unwrap());
     svst1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1957,13 +1909,13 @@ unsafe fn test_svld1sb_gather_u32offset_s32_with_svst1b_scatter_u32offset_s32()
     let loaded = svld1sb_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_u32offset_s32_with_svst1h_scatter_u32offset_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 2u32.try_into().unwrap());
     svst1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1974,13 +1926,13 @@ unsafe fn test_svld1sh_gather_u32offset_s32_with_svst1h_scatter_u32offset_s32()
         svld1sh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_gather_u32offset_u32_with_svst1b_scatter_u32offset_u32() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 1u32.try_into().unwrap());
     svst1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1990,13 +1942,13 @@ unsafe fn test_svld1sb_gather_u32offset_u32_with_svst1b_scatter_u32offset_u32()
     let loaded = svld1sb_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_u32offset_u32_with_svst1h_scatter_u32offset_u32() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 2u32.try_into().unwrap());
     svst1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2007,13 +1959,13 @@ unsafe fn test_svld1sh_gather_u32offset_u32_with_svst1h_scatter_u32offset_u32()
         svld1sh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_gather_u64offset_s64_with_svst1b_scatter_u64offset_s64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     svst1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2023,13 +1975,13 @@ unsafe fn test_svld1sb_gather_u64offset_s64_with_svst1b_scatter_u64offset_s64()
     let loaded = svld1sb_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_u64offset_s64_with_svst1h_scatter_u64offset_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     svst1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2040,13 +1992,13 @@ unsafe fn test_svld1sh_gather_u64offset_s64_with_svst1h_scatter_u64offset_s64()
         svld1sh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sw_gather_u64offset_s64_with_svst1w_scatter_u64offset_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     svst1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2057,13 +2009,13 @@ unsafe fn test_svld1sw_gather_u64offset_s64_with_svst1w_scatter_u64offset_s64()
         svld1sw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_gather_u64offset_u64_with_svst1b_scatter_u64offset_u64() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     svst1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2073,13 +2025,13 @@ unsafe fn test_svld1sb_gather_u64offset_u64_with_svst1b_scatter_u64offset_u64()
     let loaded = svld1sb_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_u64offset_u64_with_svst1h_scatter_u64offset_u64() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     svst1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2090,13 +2042,13 @@ unsafe fn test_svld1sh_gather_u64offset_u64_with_svst1h_scatter_u64offset_u64()
         svld1sh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sw_gather_u64offset_u64_with_svst1w_scatter_u64offset_u64() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     svst1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2107,13 +2059,13 @@ unsafe fn test_svld1sw_gather_u64offset_u64_with_svst1w_scatter_u64offset_u64()
         svld1sw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_gather_u32base_offset_s32_with_svst1b_scatter_u32base_offset_s32() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((1usize) as i32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 1u32.try_into().unwrap());
     svst1b_scatter_u32base_offset_s32(
         svptrue_b8(),
@@ -2132,13 +2084,13 @@ unsafe fn test_svld1sb_gather_u32base_offset_s32_with_svst1b_scatter_u32base_off
     );
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_u32base_offset_s32_with_svst1h_scatter_u32base_offset_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((1usize) as i32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 2u32.try_into().unwrap());
     svst1h_scatter_u32base_offset_s32(
         svptrue_b16(),
@@ -2157,13 +2109,13 @@ unsafe fn test_svld1sh_gather_u32base_offset_s32_with_svst1h_scatter_u32base_off
     );
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_gather_u32base_offset_u32_with_svst1b_scatter_u32base_offset_u32() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((1usize) as u32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 1u32.try_into().unwrap());
     svst1b_scatter_u32base_offset_u32(
         svptrue_b8(),
@@ -2182,13 +2134,13 @@ unsafe fn test_svld1sb_gather_u32base_offset_u32_with_svst1b_scatter_u32base_off
     );
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_u32base_offset_u32_with_svst1h_scatter_u32base_offset_u32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((1usize) as u32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 2u32.try_into().unwrap());
     svst1h_scatter_u32base_offset_u32(
         svptrue_b16(),
@@ -2207,13 +2159,13 @@ unsafe fn test_svld1sh_gather_u32base_offset_u32_with_svst1h_scatter_u32base_off
     );
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_gather_u64base_offset_s64_with_svst1b_scatter_u64base_offset_s64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
@@ -2225,13 +2177,13 @@ unsafe fn test_svld1sb_gather_u64base_offset_s64_with_svst1b_scatter_u64base_off
     let loaded = svld1sb_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_u64base_offset_s64_with_svst1h_scatter_u64base_offset_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -2243,13 +2195,13 @@ unsafe fn test_svld1sh_gather_u64base_offset_s64_with_svst1h_scatter_u64base_off
     let loaded = svld1sh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sw_gather_u64base_offset_s64_with_svst1w_scatter_u64base_offset_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -2261,13 +2213,13 @@ unsafe fn test_svld1sw_gather_u64base_offset_s64_with_svst1w_scatter_u64base_off
     let loaded = svld1sw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_gather_u64base_offset_u64_with_svst1b_scatter_u64base_offset_u64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
@@ -2279,13 +2231,13 @@ unsafe fn test_svld1sb_gather_u64base_offset_u64_with_svst1b_scatter_u64base_off
     let loaded = svld1sb_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_u64base_offset_u64_with_svst1h_scatter_u64base_offset_u64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -2297,13 +2249,13 @@ unsafe fn test_svld1sh_gather_u64base_offset_u64_with_svst1h_scatter_u64base_off
     let loaded = svld1sh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sw_gather_u64base_offset_u64_with_svst1w_scatter_u64base_offset_u64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -2315,13 +2267,13 @@ unsafe fn test_svld1sw_gather_u64base_offset_u64_with_svst1w_scatter_u64base_off
     let loaded = svld1sw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_gather_u64base_s64_with_svst1b_scatter_u64base_s64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
@@ -2333,13 +2285,13 @@ unsafe fn test_svld1sb_gather_u64base_s64_with_svst1b_scatter_u64base_s64() {
     let loaded = svld1sb_gather_u64base_s64(svptrue_b8(), bases);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_u64base_s64_with_svst1h_scatter_u64base_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -2351,13 +2303,13 @@ unsafe fn test_svld1sh_gather_u64base_s64_with_svst1h_scatter_u64base_s64() {
     let loaded = svld1sh_gather_u64base_s64(svptrue_b16(), bases);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sw_gather_u64base_s64_with_svst1w_scatter_u64base_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -2369,13 +2321,13 @@ unsafe fn test_svld1sw_gather_u64base_s64_with_svst1w_scatter_u64base_s64() {
     let loaded = svld1sw_gather_u64base_s64(svptrue_b32(), bases);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_gather_u64base_u64_with_svst1b_scatter_u64base_u64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
@@ -2387,13 +2339,13 @@ unsafe fn test_svld1sb_gather_u64base_u64_with_svst1b_scatter_u64base_u64() {
     let loaded = svld1sb_gather_u64base_u64(svptrue_b8(), bases);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_u64base_u64_with_svst1h_scatter_u64base_u64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -2405,13 +2357,13 @@ unsafe fn test_svld1sh_gather_u64base_u64_with_svst1h_scatter_u64base_u64() {
     let loaded = svld1sh_gather_u64base_u64(svptrue_b16(), bases);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sw_gather_u64base_u64_with_svst1w_scatter_u64base_u64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -2423,13 +2375,13 @@ unsafe fn test_svld1sw_gather_u64base_u64_with_svst1w_scatter_u64base_u64() {
     let loaded = svld1sw_gather_u64base_u64(svptrue_b32(), bases);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_s16_with_svst1b_s16() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s16((0usize) as i16, 1usize.try_into().unwrap());
     svst1b_s16(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i8 || val == i as i8);
@@ -2438,13 +2390,13 @@ unsafe fn test_svld1sb_s16_with_svst1b_s16() {
     let loaded = svld1sb_s16(svptrue_b8(), storage.as_ptr() as *const i8);
     assert_vector_matches_i16(
         loaded,
-        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s16((0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_s32_with_svst1b_s32() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     svst1b_s32(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i8 || val == i as i8);
@@ -2453,13 +2405,13 @@ unsafe fn test_svld1sb_s32_with_svst1b_s32() {
     let loaded = svld1sb_s32(svptrue_b8(), storage.as_ptr() as *const i8);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_s32_with_svst1h_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     svst1h_s32(svptrue_b16(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i16 || val == i as i16);
@@ -2468,13 +2420,13 @@ unsafe fn test_svld1sh_s32_with_svst1h_s32() {
     let loaded = svld1sh_s32(svptrue_b16(), storage.as_ptr() as *const i16);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_s64_with_svst1b_s64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     svst1b_s64(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i8 || val == i as i8);
@@ -2483,13 +2435,13 @@ unsafe fn test_svld1sb_s64_with_svst1b_s64() {
     let loaded = svld1sb_s64(svptrue_b8(), storage.as_ptr() as *const i8);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_s64_with_svst1h_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     svst1h_s64(svptrue_b16(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i16 || val == i as i16);
@@ -2498,13 +2450,13 @@ unsafe fn test_svld1sh_s64_with_svst1h_s64() {
     let loaded = svld1sh_s64(svptrue_b16(), storage.as_ptr() as *const i16);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sw_s64_with_svst1w_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     svst1w_s64(svptrue_b32(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i32 || val == i as i32);
@@ -2513,13 +2465,13 @@ unsafe fn test_svld1sw_s64_with_svst1w_s64() {
     let loaded = svld1sw_s64(svptrue_b32(), storage.as_ptr() as *const i32);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_u16_with_svst1b_u16() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u16((0usize) as u16, 1usize.try_into().unwrap());
     svst1b_u16(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u8 || val == i as u8);
@@ -2528,13 +2480,13 @@ unsafe fn test_svld1sb_u16_with_svst1b_u16() {
     let loaded = svld1sb_u16(svptrue_b8(), storage.as_ptr() as *const i8);
     assert_vector_matches_u16(
         loaded,
-        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u16((0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_u32_with_svst1b_u32() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     svst1b_u32(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u8 || val == i as u8);
@@ -2543,13 +2495,13 @@ unsafe fn test_svld1sb_u32_with_svst1b_u32() {
     let loaded = svld1sb_u32(svptrue_b8(), storage.as_ptr() as *const i8);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_u32_with_svst1h_u32() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     svst1h_u32(svptrue_b16(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u16 || val == i as u16);
@@ -2558,13 +2510,13 @@ unsafe fn test_svld1sh_u32_with_svst1h_u32() {
     let loaded = svld1sh_u32(svptrue_b16(), storage.as_ptr() as *const i16);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_u64_with_svst1b_u64() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     svst1b_u64(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u8 || val == i as u8);
@@ -2573,13 +2525,13 @@ unsafe fn test_svld1sb_u64_with_svst1b_u64() {
     let loaded = svld1sb_u64(svptrue_b8(), storage.as_ptr() as *const i8);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_u64_with_svst1h_u64() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     svst1h_u64(svptrue_b16(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u16 || val == i as u16);
@@ -2588,13 +2540,13 @@ unsafe fn test_svld1sh_u64_with_svst1h_u64() {
     let loaded = svld1sh_u64(svptrue_b16(), storage.as_ptr() as *const i16);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sw_u64_with_svst1w_u64() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     svst1w_u64(svptrue_b32(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u32 || val == i as u32);
@@ -2603,17 +2555,14 @@ unsafe fn test_svld1sw_u64_with_svst1w_u64() {
     let loaded = svld1sw_u64(svptrue_b32(), storage.as_ptr() as *const i32);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_vnum_s16_with_svst1b_vnum_s16() {
     let len = svcnth() as usize;
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s16(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s16((len + 0usize) as i16, 1usize.try_into().unwrap());
     svst1b_vnum_s16(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i8 || val == i as i8);
@@ -2622,20 +2571,14 @@ unsafe fn test_svld1sb_vnum_s16_with_svst1b_vnum_s16() {
     let loaded = svld1sb_vnum_s16(svptrue_b8(), storage.as_ptr() as *const i8, 1);
     assert_vector_matches_i16(
         loaded,
-        svindex_s16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_vnum_s32_with_svst1b_vnum_s32() {
     let len = svcntw() as usize;
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s32(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap());
     svst1b_vnum_s32(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i8 || val == i as i8);
@@ -2644,20 +2587,14 @@ unsafe fn test_svld1sb_vnum_s32_with_svst1b_vnum_s32() {
     let loaded = svld1sb_vnum_s32(svptrue_b8(), storage.as_ptr() as *const i8, 1);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_vnum_s32_with_svst1h_vnum_s32() {
     let len = svcntw() as usize;
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap());
     svst1h_vnum_s32(svptrue_b16(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i16 || val == i as i16);
@@ -2666,20 +2603,14 @@ unsafe fn test_svld1sh_vnum_s32_with_svst1h_vnum_s32() {
     let loaded = svld1sh_vnum_s32(svptrue_b16(), storage.as_ptr() as *const i16, 1);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_vnum_s64_with_svst1b_vnum_s64() {
     let len = svcntd() as usize;
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap());
     svst1b_vnum_s64(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i8 || val == i as i8);
@@ -2688,20 +2619,14 @@ unsafe fn test_svld1sb_vnum_s64_with_svst1b_vnum_s64() {
     let loaded = svld1sb_vnum_s64(svptrue_b8(), storage.as_ptr() as *const i8, 1);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_vnum_s64_with_svst1h_vnum_s64() {
     let len = svcntd() as usize;
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap());
     svst1h_vnum_s64(svptrue_b16(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i16 || val == i as i16);
@@ -2710,20 +2635,14 @@ unsafe fn test_svld1sh_vnum_s64_with_svst1h_vnum_s64() {
     let loaded = svld1sh_vnum_s64(svptrue_b16(), storage.as_ptr() as *const i16, 1);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sw_vnum_s64_with_svst1w_vnum_s64() {
     let len = svcntd() as usize;
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap());
     svst1w_vnum_s64(svptrue_b32(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i32 || val == i as i32);
@@ -2732,20 +2651,14 @@ unsafe fn test_svld1sw_vnum_s64_with_svst1w_vnum_s64() {
     let loaded = svld1sw_vnum_s64(svptrue_b32(), storage.as_ptr() as *const i32, 1);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_vnum_u16_with_svst1b_vnum_u16() {
     let len = svcnth() as usize;
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u16(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u16((len + 0usize) as u16, 1usize.try_into().unwrap());
     svst1b_vnum_u16(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u8 || val == i as u8);
@@ -2754,20 +2667,14 @@ unsafe fn test_svld1sb_vnum_u16_with_svst1b_vnum_u16() {
     let loaded = svld1sb_vnum_u16(svptrue_b8(), storage.as_ptr() as *const i8, 1);
     assert_vector_matches_u16(
         loaded,
-        svindex_u16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_vnum_u32_with_svst1b_vnum_u32() {
     let len = svcntw() as usize;
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u32(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap());
     svst1b_vnum_u32(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u8 || val == i as u8);
@@ -2776,20 +2683,14 @@ unsafe fn test_svld1sb_vnum_u32_with_svst1b_vnum_u32() {
     let loaded = svld1sb_vnum_u32(svptrue_b8(), storage.as_ptr() as *const i8, 1);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_vnum_u32_with_svst1h_vnum_u32() {
     let len = svcntw() as usize;
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u32(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap());
     svst1h_vnum_u32(svptrue_b16(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u16 || val == i as u16);
@@ -2798,20 +2699,14 @@ unsafe fn test_svld1sh_vnum_u32_with_svst1h_vnum_u32() {
     let loaded = svld1sh_vnum_u32(svptrue_b16(), storage.as_ptr() as *const i16, 1);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sb_vnum_u64_with_svst1b_vnum_u64() {
     let len = svcntd() as usize;
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u64(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap());
     svst1b_vnum_u64(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u8 || val == i as u8);
@@ -2820,20 +2715,14 @@ unsafe fn test_svld1sb_vnum_u64_with_svst1b_vnum_u64() {
     let loaded = svld1sb_vnum_u64(svptrue_b8(), storage.as_ptr() as *const i8, 1);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_vnum_u64_with_svst1h_vnum_u64() {
     let len = svcntd() as usize;
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap());
     svst1h_vnum_u64(svptrue_b16(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u16 || val == i as u16);
@@ -2842,20 +2731,14 @@ unsafe fn test_svld1sh_vnum_u64_with_svst1h_vnum_u64() {
     let loaded = svld1sh_vnum_u64(svptrue_b16(), storage.as_ptr() as *const i16, 1);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sw_vnum_u64_with_svst1w_vnum_u64() {
     let len = svcntd() as usize;
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap());
     svst1w_vnum_u64(svptrue_b32(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u32 || val == i as u32);
@@ -2864,16 +2747,13 @@ unsafe fn test_svld1sw_vnum_u64_with_svst1w_vnum_u64() {
     let loaded = svld1sw_vnum_u64(svptrue_b32(), storage.as_ptr() as *const i32, 1);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_s32index_s32_with_svst1h_scatter_s32index_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let indices = svindex_s32(0, 1);
     svst1h_scatter_s32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2884,13 +2764,13 @@ unsafe fn test_svld1sh_gather_s32index_s32_with_svst1h_scatter_s32index_s32() {
         svld1sh_gather_s32index_s32(svptrue_b16(), storage.as_ptr() as *const i16, indices);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_s32index_u32_with_svst1h_scatter_s32index_u32() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let indices = svindex_s32(0, 1);
     svst1h_scatter_s32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2901,13 +2781,13 @@ unsafe fn test_svld1sh_gather_s32index_u32_with_svst1h_scatter_s32index_u32() {
         svld1sh_gather_s32index_u32(svptrue_b16(), storage.as_ptr() as *const i16, indices);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_s64index_s64_with_svst1h_scatter_s64index_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svst1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2918,13 +2798,13 @@ unsafe fn test_svld1sh_gather_s64index_s64_with_svst1h_scatter_s64index_s64() {
         svld1sh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sw_gather_s64index_s64_with_svst1w_scatter_s64index_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svst1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2935,13 +2815,13 @@ unsafe fn test_svld1sw_gather_s64index_s64_with_svst1w_scatter_s64index_s64() {
         svld1sw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_s64index_u64_with_svst1h_scatter_s64index_u64() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svst1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2952,13 +2832,13 @@ unsafe fn test_svld1sh_gather_s64index_u64_with_svst1h_scatter_s64index_u64() {
         svld1sh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sw_gather_s64index_u64_with_svst1w_scatter_s64index_u64() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svst1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2969,13 +2849,13 @@ unsafe fn test_svld1sw_gather_s64index_u64_with_svst1w_scatter_s64index_u64() {
         svld1sw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_u32index_s32_with_svst1h_scatter_u32index_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let indices = svindex_u32(0, 1);
     svst1h_scatter_u32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2986,13 +2866,13 @@ unsafe fn test_svld1sh_gather_u32index_s32_with_svst1h_scatter_u32index_s32() {
         svld1sh_gather_u32index_s32(svptrue_b16(), storage.as_ptr() as *const i16, indices);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_u32index_u32_with_svst1h_scatter_u32index_u32() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let indices = svindex_u32(0, 1);
     svst1h_scatter_u32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3003,13 +2883,13 @@ unsafe fn test_svld1sh_gather_u32index_u32_with_svst1h_scatter_u32index_u32() {
         svld1sh_gather_u32index_u32(svptrue_b16(), storage.as_ptr() as *const i16, indices);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_u64index_s64_with_svst1h_scatter_u64index_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svst1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3020,13 +2900,13 @@ unsafe fn test_svld1sh_gather_u64index_s64_with_svst1h_scatter_u64index_s64() {
         svld1sh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sw_gather_u64index_s64_with_svst1w_scatter_u64index_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svst1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3037,13 +2917,13 @@ unsafe fn test_svld1sw_gather_u64index_s64_with_svst1w_scatter_u64index_s64() {
         svld1sw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_u64index_u64_with_svst1h_scatter_u64index_u64() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svst1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3054,13 +2934,13 @@ unsafe fn test_svld1sh_gather_u64index_u64_with_svst1h_scatter_u64index_u64() {
         svld1sh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sw_gather_u64index_u64_with_svst1w_scatter_u64index_u64() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svst1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3071,13 +2951,13 @@ unsafe fn test_svld1sw_gather_u64index_u64_with_svst1w_scatter_u64index_u64() {
         svld1sw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_u32base_index_s32_with_svst1h_scatter_u32base_index_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((1usize) as i32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 2u32.try_into().unwrap());
     svst1h_scatter_u32base_index_s32(
         svptrue_b16(),
@@ -3096,13 +2976,13 @@ unsafe fn test_svld1sh_gather_u32base_index_s32_with_svst1h_scatter_u32base_inde
     );
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_u32base_index_u32_with_svst1h_scatter_u32base_index_u32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((1usize) as u32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 2u32.try_into().unwrap());
     svst1h_scatter_u32base_index_u32(
         svptrue_b16(),
@@ -3121,13 +3001,13 @@ unsafe fn test_svld1sh_gather_u32base_index_u32_with_svst1h_scatter_u32base_inde
     );
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_u64base_index_s64_with_svst1h_scatter_u64base_index_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -3139,13 +3019,13 @@ unsafe fn test_svld1sh_gather_u64base_index_s64_with_svst1h_scatter_u64base_inde
     let loaded = svld1sh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sw_gather_u64base_index_s64_with_svst1w_scatter_u64base_index_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -3157,13 +3037,13 @@ unsafe fn test_svld1sw_gather_u64base_index_s64_with_svst1w_scatter_u64base_inde
     let loaded = svld1sw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sh_gather_u64base_index_u64_with_svst1h_scatter_u64base_index_u64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -3175,13 +3055,13 @@ unsafe fn test_svld1sh_gather_u64base_index_u64_with_svst1h_scatter_u64base_inde
     let loaded = svld1sh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1sw_gather_u64base_index_u64_with_svst1w_scatter_u64base_index_u64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -3193,13 +3073,13 @@ unsafe fn test_svld1sw_gather_u64base_index_u64_with_svst1w_scatter_u64base_inde
     let loaded = svld1sw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_gather_s32offset_s32_with_svst1b_scatter_s32offset_s32() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let offsets = svindex_s32(0, 1u32.try_into().unwrap());
     svst1b_scatter_s32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3209,13 +3089,13 @@ unsafe fn test_svld1ub_gather_s32offset_s32_with_svst1b_scatter_s32offset_s32()
     let loaded = svld1ub_gather_s32offset_s32(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_s32offset_s32_with_svst1h_scatter_s32offset_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let offsets = svindex_s32(0, 2u32.try_into().unwrap());
     svst1h_scatter_s32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3226,13 +3106,13 @@ unsafe fn test_svld1uh_gather_s32offset_s32_with_svst1h_scatter_s32offset_s32()
         svld1uh_gather_s32offset_s32(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_gather_s32offset_u32_with_svst1b_scatter_s32offset_u32() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let offsets = svindex_s32(0, 1u32.try_into().unwrap());
     svst1b_scatter_s32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3242,13 +3122,13 @@ unsafe fn test_svld1ub_gather_s32offset_u32_with_svst1b_scatter_s32offset_u32()
     let loaded = svld1ub_gather_s32offset_u32(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_s32offset_u32_with_svst1h_scatter_s32offset_u32() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let offsets = svindex_s32(0, 2u32.try_into().unwrap());
     svst1h_scatter_s32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3259,13 +3139,13 @@ unsafe fn test_svld1uh_gather_s32offset_u32_with_svst1h_scatter_s32offset_u32()
         svld1uh_gather_s32offset_u32(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_gather_s64offset_s64_with_svst1b_scatter_s64offset_s64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 1u32.try_into().unwrap());
     svst1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3275,13 +3155,13 @@ unsafe fn test_svld1ub_gather_s64offset_s64_with_svst1b_scatter_s64offset_s64()
     let loaded = svld1ub_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_s64offset_s64_with_svst1h_scatter_s64offset_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 2u32.try_into().unwrap());
     svst1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3292,13 +3172,13 @@ unsafe fn test_svld1uh_gather_s64offset_s64_with_svst1h_scatter_s64offset_s64()
         svld1uh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uw_gather_s64offset_s64_with_svst1w_scatter_s64offset_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 4u32.try_into().unwrap());
     svst1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3309,13 +3189,13 @@ unsafe fn test_svld1uw_gather_s64offset_s64_with_svst1w_scatter_s64offset_s64()
         svld1uw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_gather_s64offset_u64_with_svst1b_scatter_s64offset_u64() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 1u32.try_into().unwrap());
     svst1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3325,13 +3205,13 @@ unsafe fn test_svld1ub_gather_s64offset_u64_with_svst1b_scatter_s64offset_u64()
     let loaded = svld1ub_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_s64offset_u64_with_svst1h_scatter_s64offset_u64() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 2u32.try_into().unwrap());
     svst1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3342,13 +3222,13 @@ unsafe fn test_svld1uh_gather_s64offset_u64_with_svst1h_scatter_s64offset_u64()
         svld1uh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uw_gather_s64offset_u64_with_svst1w_scatter_s64offset_u64() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 4u32.try_into().unwrap());
     svst1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3359,13 +3239,13 @@ unsafe fn test_svld1uw_gather_s64offset_u64_with_svst1w_scatter_s64offset_u64()
         svld1uw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_gather_u32offset_s32_with_svst1b_scatter_u32offset_s32() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 1u32.try_into().unwrap());
     svst1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3375,13 +3255,13 @@ unsafe fn test_svld1ub_gather_u32offset_s32_with_svst1b_scatter_u32offset_s32()
     let loaded = svld1ub_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_u32offset_s32_with_svst1h_scatter_u32offset_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 2u32.try_into().unwrap());
     svst1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3392,13 +3272,13 @@ unsafe fn test_svld1uh_gather_u32offset_s32_with_svst1h_scatter_u32offset_s32()
         svld1uh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_gather_u32offset_u32_with_svst1b_scatter_u32offset_u32() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 1u32.try_into().unwrap());
     svst1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3408,13 +3288,13 @@ unsafe fn test_svld1ub_gather_u32offset_u32_with_svst1b_scatter_u32offset_u32()
     let loaded = svld1ub_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_u32offset_u32_with_svst1h_scatter_u32offset_u32() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 2u32.try_into().unwrap());
     svst1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3425,13 +3305,13 @@ unsafe fn test_svld1uh_gather_u32offset_u32_with_svst1h_scatter_u32offset_u32()
         svld1uh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_gather_u64offset_s64_with_svst1b_scatter_u64offset_s64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     svst1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3441,13 +3321,13 @@ unsafe fn test_svld1ub_gather_u64offset_s64_with_svst1b_scatter_u64offset_s64()
     let loaded = svld1ub_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_u64offset_s64_with_svst1h_scatter_u64offset_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     svst1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3458,13 +3338,13 @@ unsafe fn test_svld1uh_gather_u64offset_s64_with_svst1h_scatter_u64offset_s64()
         svld1uh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uw_gather_u64offset_s64_with_svst1w_scatter_u64offset_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     svst1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3475,13 +3355,13 @@ unsafe fn test_svld1uw_gather_u64offset_s64_with_svst1w_scatter_u64offset_s64()
         svld1uw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_gather_u64offset_u64_with_svst1b_scatter_u64offset_u64() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     svst1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3491,13 +3371,13 @@ unsafe fn test_svld1ub_gather_u64offset_u64_with_svst1b_scatter_u64offset_u64()
     let loaded = svld1ub_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_u64offset_u64_with_svst1h_scatter_u64offset_u64() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     svst1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3508,13 +3388,13 @@ unsafe fn test_svld1uh_gather_u64offset_u64_with_svst1h_scatter_u64offset_u64()
         svld1uh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uw_gather_u64offset_u64_with_svst1w_scatter_u64offset_u64() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     svst1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -3525,13 +3405,13 @@ unsafe fn test_svld1uw_gather_u64offset_u64_with_svst1w_scatter_u64offset_u64()
         svld1uw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_gather_u32base_offset_s32_with_svst1b_scatter_u32base_offset_s32() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((1usize) as i32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 1u32.try_into().unwrap());
     svst1b_scatter_u32base_offset_s32(
         svptrue_b8(),
@@ -3550,13 +3430,13 @@ unsafe fn test_svld1ub_gather_u32base_offset_s32_with_svst1b_scatter_u32base_off
     );
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_u32base_offset_s32_with_svst1h_scatter_u32base_offset_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((1usize) as i32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 2u32.try_into().unwrap());
     svst1h_scatter_u32base_offset_s32(
         svptrue_b16(),
@@ -3575,13 +3455,13 @@ unsafe fn test_svld1uh_gather_u32base_offset_s32_with_svst1h_scatter_u32base_off
     );
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_gather_u32base_offset_u32_with_svst1b_scatter_u32base_offset_u32() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((1usize) as u32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 1u32.try_into().unwrap());
     svst1b_scatter_u32base_offset_u32(
         svptrue_b8(),
@@ -3600,13 +3480,13 @@ unsafe fn test_svld1ub_gather_u32base_offset_u32_with_svst1b_scatter_u32base_off
     );
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_u32base_offset_u32_with_svst1h_scatter_u32base_offset_u32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((1usize) as u32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 2u32.try_into().unwrap());
     svst1h_scatter_u32base_offset_u32(
         svptrue_b16(),
@@ -3625,13 +3505,13 @@ unsafe fn test_svld1uh_gather_u32base_offset_u32_with_svst1h_scatter_u32base_off
     );
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_gather_u64base_offset_s64_with_svst1b_scatter_u64base_offset_s64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
@@ -3643,13 +3523,13 @@ unsafe fn test_svld1ub_gather_u64base_offset_s64_with_svst1b_scatter_u64base_off
     let loaded = svld1ub_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_u64base_offset_s64_with_svst1h_scatter_u64base_offset_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -3661,13 +3541,13 @@ unsafe fn test_svld1uh_gather_u64base_offset_s64_with_svst1h_scatter_u64base_off
     let loaded = svld1uh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uw_gather_u64base_offset_s64_with_svst1w_scatter_u64base_offset_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -3679,13 +3559,13 @@ unsafe fn test_svld1uw_gather_u64base_offset_s64_with_svst1w_scatter_u64base_off
     let loaded = svld1uw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_gather_u64base_offset_u64_with_svst1b_scatter_u64base_offset_u64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
@@ -3697,13 +3577,13 @@ unsafe fn test_svld1ub_gather_u64base_offset_u64_with_svst1b_scatter_u64base_off
     let loaded = svld1ub_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_u64base_offset_u64_with_svst1h_scatter_u64base_offset_u64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -3715,13 +3595,13 @@ unsafe fn test_svld1uh_gather_u64base_offset_u64_with_svst1h_scatter_u64base_off
     let loaded = svld1uh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uw_gather_u64base_offset_u64_with_svst1w_scatter_u64base_offset_u64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -3733,13 +3613,13 @@ unsafe fn test_svld1uw_gather_u64base_offset_u64_with_svst1w_scatter_u64base_off
     let loaded = svld1uw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_gather_u64base_s64_with_svst1b_scatter_u64base_s64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
@@ -3751,13 +3631,13 @@ unsafe fn test_svld1ub_gather_u64base_s64_with_svst1b_scatter_u64base_s64() {
     let loaded = svld1ub_gather_u64base_s64(svptrue_b8(), bases);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_u64base_s64_with_svst1h_scatter_u64base_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -3769,13 +3649,13 @@ unsafe fn test_svld1uh_gather_u64base_s64_with_svst1h_scatter_u64base_s64() {
     let loaded = svld1uh_gather_u64base_s64(svptrue_b16(), bases);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uw_gather_u64base_s64_with_svst1w_scatter_u64base_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -3787,13 +3667,13 @@ unsafe fn test_svld1uw_gather_u64base_s64_with_svst1w_scatter_u64base_s64() {
     let loaded = svld1uw_gather_u64base_s64(svptrue_b32(), bases);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_gather_u64base_u64_with_svst1b_scatter_u64base_u64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
@@ -3805,13 +3685,13 @@ unsafe fn test_svld1ub_gather_u64base_u64_with_svst1b_scatter_u64base_u64() {
     let loaded = svld1ub_gather_u64base_u64(svptrue_b8(), bases);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_u64base_u64_with_svst1h_scatter_u64base_u64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -3823,13 +3703,13 @@ unsafe fn test_svld1uh_gather_u64base_u64_with_svst1h_scatter_u64base_u64() {
     let loaded = svld1uh_gather_u64base_u64(svptrue_b16(), bases);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uw_gather_u64base_u64_with_svst1w_scatter_u64base_u64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -3841,13 +3721,13 @@ unsafe fn test_svld1uw_gather_u64base_u64_with_svst1w_scatter_u64base_u64() {
     let loaded = svld1uw_gather_u64base_u64(svptrue_b32(), bases);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_s16_with_svst1b_s16() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s16((0usize) as i16, 1usize.try_into().unwrap());
     svst1b_s16(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i8 || val == i as i8);
@@ -3856,13 +3736,13 @@ unsafe fn test_svld1ub_s16_with_svst1b_s16() {
     let loaded = svld1ub_s16(svptrue_b8(), storage.as_ptr() as *const u8);
     assert_vector_matches_i16(
         loaded,
-        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s16((0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_s32_with_svst1b_s32() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     svst1b_s32(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i8 || val == i as i8);
@@ -3871,13 +3751,13 @@ unsafe fn test_svld1ub_s32_with_svst1b_s32() {
     let loaded = svld1ub_s32(svptrue_b8(), storage.as_ptr() as *const u8);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_s32_with_svst1h_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     svst1h_s32(svptrue_b16(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i16 || val == i as i16);
@@ -3886,13 +3766,13 @@ unsafe fn test_svld1uh_s32_with_svst1h_s32() {
     let loaded = svld1uh_s32(svptrue_b16(), storage.as_ptr() as *const u16);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_s64_with_svst1b_s64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     svst1b_s64(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i8 || val == i as i8);
@@ -3901,13 +3781,13 @@ unsafe fn test_svld1ub_s64_with_svst1b_s64() {
     let loaded = svld1ub_s64(svptrue_b8(), storage.as_ptr() as *const u8);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_s64_with_svst1h_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     svst1h_s64(svptrue_b16(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i16 || val == i as i16);
@@ -3916,13 +3796,13 @@ unsafe fn test_svld1uh_s64_with_svst1h_s64() {
     let loaded = svld1uh_s64(svptrue_b16(), storage.as_ptr() as *const u16);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uw_s64_with_svst1w_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     svst1w_s64(svptrue_b32(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i32 || val == i as i32);
@@ -3931,13 +3811,13 @@ unsafe fn test_svld1uw_s64_with_svst1w_s64() {
     let loaded = svld1uw_s64(svptrue_b32(), storage.as_ptr() as *const u32);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_u16_with_svst1b_u16() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u16((0usize) as u16, 1usize.try_into().unwrap());
     svst1b_u16(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u8 || val == i as u8);
@@ -3946,13 +3826,13 @@ unsafe fn test_svld1ub_u16_with_svst1b_u16() {
     let loaded = svld1ub_u16(svptrue_b8(), storage.as_ptr() as *const u8);
     assert_vector_matches_u16(
         loaded,
-        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u16((0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_u32_with_svst1b_u32() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     svst1b_u32(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u8 || val == i as u8);
@@ -3961,13 +3841,13 @@ unsafe fn test_svld1ub_u32_with_svst1b_u32() {
     let loaded = svld1ub_u32(svptrue_b8(), storage.as_ptr() as *const u8);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_u32_with_svst1h_u32() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     svst1h_u32(svptrue_b16(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u16 || val == i as u16);
@@ -3976,13 +3856,13 @@ unsafe fn test_svld1uh_u32_with_svst1h_u32() {
     let loaded = svld1uh_u32(svptrue_b16(), storage.as_ptr() as *const u16);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_u64_with_svst1b_u64() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     svst1b_u64(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u8 || val == i as u8);
@@ -3991,13 +3871,13 @@ unsafe fn test_svld1ub_u64_with_svst1b_u64() {
     let loaded = svld1ub_u64(svptrue_b8(), storage.as_ptr() as *const u8);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_u64_with_svst1h_u64() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     svst1h_u64(svptrue_b16(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u16 || val == i as u16);
@@ -4006,13 +3886,13 @@ unsafe fn test_svld1uh_u64_with_svst1h_u64() {
     let loaded = svld1uh_u64(svptrue_b16(), storage.as_ptr() as *const u16);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uw_u64_with_svst1w_u64() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     svst1w_u64(svptrue_b32(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u32 || val == i as u32);
@@ -4021,17 +3901,14 @@ unsafe fn test_svld1uw_u64_with_svst1w_u64() {
     let loaded = svld1uw_u64(svptrue_b32(), storage.as_ptr() as *const u32);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_vnum_s16_with_svst1b_vnum_s16() {
     let len = svcnth() as usize;
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s16(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s16((len + 0usize) as i16, 1usize.try_into().unwrap());
     svst1b_vnum_s16(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i8 || val == i as i8);
@@ -4040,20 +3917,14 @@ unsafe fn test_svld1ub_vnum_s16_with_svst1b_vnum_s16() {
     let loaded = svld1ub_vnum_s16(svptrue_b8(), storage.as_ptr() as *const u8, 1);
     assert_vector_matches_i16(
         loaded,
-        svindex_s16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_vnum_s32_with_svst1b_vnum_s32() {
     let len = svcntw() as usize;
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s32(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap());
     svst1b_vnum_s32(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i8 || val == i as i8);
@@ -4062,20 +3933,14 @@ unsafe fn test_svld1ub_vnum_s32_with_svst1b_vnum_s32() {
     let loaded = svld1ub_vnum_s32(svptrue_b8(), storage.as_ptr() as *const u8, 1);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_vnum_s32_with_svst1h_vnum_s32() {
     let len = svcntw() as usize;
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap());
     svst1h_vnum_s32(svptrue_b16(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i16 || val == i as i16);
@@ -4084,20 +3949,14 @@ unsafe fn test_svld1uh_vnum_s32_with_svst1h_vnum_s32() {
     let loaded = svld1uh_vnum_s32(svptrue_b16(), storage.as_ptr() as *const u16, 1);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_vnum_s64_with_svst1b_vnum_s64() {
     let len = svcntd() as usize;
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap());
     svst1b_vnum_s64(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i8 || val == i as i8);
@@ -4106,20 +3965,14 @@ unsafe fn test_svld1ub_vnum_s64_with_svst1b_vnum_s64() {
     let loaded = svld1ub_vnum_s64(svptrue_b8(), storage.as_ptr() as *const u8, 1);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_vnum_s64_with_svst1h_vnum_s64() {
     let len = svcntd() as usize;
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap());
     svst1h_vnum_s64(svptrue_b16(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i16 || val == i as i16);
@@ -4128,20 +3981,14 @@ unsafe fn test_svld1uh_vnum_s64_with_svst1h_vnum_s64() {
     let loaded = svld1uh_vnum_s64(svptrue_b16(), storage.as_ptr() as *const u16, 1);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uw_vnum_s64_with_svst1w_vnum_s64() {
     let len = svcntd() as usize;
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap());
     svst1w_vnum_s64(svptrue_b32(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i32 || val == i as i32);
@@ -4150,20 +3997,14 @@ unsafe fn test_svld1uw_vnum_s64_with_svst1w_vnum_s64() {
     let loaded = svld1uw_vnum_s64(svptrue_b32(), storage.as_ptr() as *const u32, 1);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_vnum_u16_with_svst1b_vnum_u16() {
     let len = svcnth() as usize;
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u16(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u16((len + 0usize) as u16, 1usize.try_into().unwrap());
     svst1b_vnum_u16(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u8 || val == i as u8);
@@ -4172,20 +4013,14 @@ unsafe fn test_svld1ub_vnum_u16_with_svst1b_vnum_u16() {
     let loaded = svld1ub_vnum_u16(svptrue_b8(), storage.as_ptr() as *const u8, 1);
     assert_vector_matches_u16(
         loaded,
-        svindex_u16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_vnum_u32_with_svst1b_vnum_u32() {
     let len = svcntw() as usize;
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u32(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap());
     svst1b_vnum_u32(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u8 || val == i as u8);
@@ -4194,20 +4029,14 @@ unsafe fn test_svld1ub_vnum_u32_with_svst1b_vnum_u32() {
     let loaded = svld1ub_vnum_u32(svptrue_b8(), storage.as_ptr() as *const u8, 1);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_vnum_u32_with_svst1h_vnum_u32() {
     let len = svcntw() as usize;
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u32(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap());
     svst1h_vnum_u32(svptrue_b16(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u16 || val == i as u16);
@@ -4216,20 +4045,14 @@ unsafe fn test_svld1uh_vnum_u32_with_svst1h_vnum_u32() {
     let loaded = svld1uh_vnum_u32(svptrue_b16(), storage.as_ptr() as *const u16, 1);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1ub_vnum_u64_with_svst1b_vnum_u64() {
     let len = svcntd() as usize;
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u64(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap());
     svst1b_vnum_u64(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u8 || val == i as u8);
@@ -4238,20 +4061,14 @@ unsafe fn test_svld1ub_vnum_u64_with_svst1b_vnum_u64() {
     let loaded = svld1ub_vnum_u64(svptrue_b8(), storage.as_ptr() as *const u8, 1);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_vnum_u64_with_svst1h_vnum_u64() {
     let len = svcntd() as usize;
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap());
     svst1h_vnum_u64(svptrue_b16(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u16 || val == i as u16);
@@ -4260,20 +4077,14 @@ unsafe fn test_svld1uh_vnum_u64_with_svst1h_vnum_u64() {
     let loaded = svld1uh_vnum_u64(svptrue_b16(), storage.as_ptr() as *const u16, 1);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uw_vnum_u64_with_svst1w_vnum_u64() {
     let len = svcntd() as usize;
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap());
     svst1w_vnum_u64(svptrue_b32(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u32 || val == i as u32);
@@ -4282,16 +4093,13 @@ unsafe fn test_svld1uw_vnum_u64_with_svst1w_vnum_u64() {
     let loaded = svld1uw_vnum_u64(svptrue_b32(), storage.as_ptr() as *const u32, 1);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_s32index_s32_with_svst1h_scatter_s32index_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let indices = svindex_s32(0, 1);
     svst1h_scatter_s32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4302,13 +4110,13 @@ unsafe fn test_svld1uh_gather_s32index_s32_with_svst1h_scatter_s32index_s32() {
         svld1uh_gather_s32index_s32(svptrue_b16(), storage.as_ptr() as *const u16, indices);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_s32index_u32_with_svst1h_scatter_s32index_u32() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let indices = svindex_s32(0, 1);
     svst1h_scatter_s32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4319,13 +4127,13 @@ unsafe fn test_svld1uh_gather_s32index_u32_with_svst1h_scatter_s32index_u32() {
         svld1uh_gather_s32index_u32(svptrue_b16(), storage.as_ptr() as *const u16, indices);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_s64index_s64_with_svst1h_scatter_s64index_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svst1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4336,13 +4144,13 @@ unsafe fn test_svld1uh_gather_s64index_s64_with_svst1h_scatter_s64index_s64() {
         svld1uh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uw_gather_s64index_s64_with_svst1w_scatter_s64index_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svst1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4353,13 +4161,13 @@ unsafe fn test_svld1uw_gather_s64index_s64_with_svst1w_scatter_s64index_s64() {
         svld1uw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_s64index_u64_with_svst1h_scatter_s64index_u64() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svst1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4370,13 +4178,13 @@ unsafe fn test_svld1uh_gather_s64index_u64_with_svst1h_scatter_s64index_u64() {
         svld1uh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uw_gather_s64index_u64_with_svst1w_scatter_s64index_u64() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svst1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4387,13 +4195,13 @@ unsafe fn test_svld1uw_gather_s64index_u64_with_svst1w_scatter_s64index_u64() {
         svld1uw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_u32index_s32_with_svst1h_scatter_u32index_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let indices = svindex_u32(0, 1);
     svst1h_scatter_u32index_s32(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4404,13 +4212,13 @@ unsafe fn test_svld1uh_gather_u32index_s32_with_svst1h_scatter_u32index_s32() {
         svld1uh_gather_u32index_s32(svptrue_b16(), storage.as_ptr() as *const u16, indices);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_u32index_u32_with_svst1h_scatter_u32index_u32() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let indices = svindex_u32(0, 1);
     svst1h_scatter_u32index_u32(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4421,13 +4229,13 @@ unsafe fn test_svld1uh_gather_u32index_u32_with_svst1h_scatter_u32index_u32() {
         svld1uh_gather_u32index_u32(svptrue_b16(), storage.as_ptr() as *const u16, indices);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_u64index_s64_with_svst1h_scatter_u64index_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svst1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4438,13 +4246,13 @@ unsafe fn test_svld1uh_gather_u64index_s64_with_svst1h_scatter_u64index_s64() {
         svld1uh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uw_gather_u64index_s64_with_svst1w_scatter_u64index_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svst1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4455,13 +4263,13 @@ unsafe fn test_svld1uw_gather_u64index_s64_with_svst1w_scatter_u64index_s64() {
         svld1uw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_u64index_u64_with_svst1h_scatter_u64index_u64() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svst1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4472,13 +4280,13 @@ unsafe fn test_svld1uh_gather_u64index_u64_with_svst1h_scatter_u64index_u64() {
         svld1uh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uw_gather_u64index_u64_with_svst1w_scatter_u64index_u64() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svst1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4489,13 +4297,13 @@ unsafe fn test_svld1uw_gather_u64index_u64_with_svst1w_scatter_u64index_u64() {
         svld1uw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_u32base_index_s32_with_svst1h_scatter_u32base_index_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((1usize) as i32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 2u32.try_into().unwrap());
     svst1h_scatter_u32base_index_s32(
         svptrue_b16(),
@@ -4514,13 +4322,13 @@ unsafe fn test_svld1uh_gather_u32base_index_s32_with_svst1h_scatter_u32base_inde
     );
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_u32base_index_u32_with_svst1h_scatter_u32base_index_u32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((1usize) as u32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 2u32.try_into().unwrap());
     svst1h_scatter_u32base_index_u32(
         svptrue_b16(),
@@ -4539,13 +4347,13 @@ unsafe fn test_svld1uh_gather_u32base_index_u32_with_svst1h_scatter_u32base_inde
     );
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_u64base_index_s64_with_svst1h_scatter_u64base_index_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -4557,13 +4365,13 @@ unsafe fn test_svld1uh_gather_u64base_index_s64_with_svst1h_scatter_u64base_inde
     let loaded = svld1uh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uw_gather_u64base_index_s64_with_svst1w_scatter_u64base_index_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -4575,13 +4383,13 @@ unsafe fn test_svld1uw_gather_u64base_index_s64_with_svst1w_scatter_u64base_inde
     let loaded = svld1uw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uh_gather_u64base_index_u64_with_svst1h_scatter_u64base_index_u64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -4593,13 +4401,13 @@ unsafe fn test_svld1uh_gather_u64base_index_u64_with_svst1h_scatter_u64base_inde
     let loaded = svld1uh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld1uw_gather_u64base_index_u64_with_svst1w_scatter_u64base_index_u64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -4611,7 +4419,7 @@ unsafe fn test_svld1uw_gather_u64base_index_u64_with_svst1w_scatter_u64base_inde
     let loaded = svld1uw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -4686,8 +4494,8 @@ unsafe fn test_svld2_f64_with_svst2_f64() {
 unsafe fn test_svld2_s8_with_svst2_s8() {
     let mut storage = [0 as i8; 1280usize];
     let data = svcreate2_s8(
-        svindex_s8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
-        svindex_s8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_s8((0usize) as i8, 2usize.try_into().unwrap()),
+        svindex_s8((1usize) as i8, 2usize.try_into().unwrap()),
     );
     svst2_s8(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4697,19 +4505,19 @@ unsafe fn test_svld2_s8_with_svst2_s8() {
     let loaded = svld2_s8(svptrue_b8(), storage.as_ptr() as *const i8);
     assert_vector_matches_i8(
         svget2_s8::<{ 0usize as i32 }>(loaded),
-        svindex_s8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_s8((0usize) as i8, 2usize.try_into().unwrap()),
     );
     assert_vector_matches_i8(
         svget2_s8::<{ 1usize as i32 }>(loaded),
-        svindex_s8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_s8((1usize) as i8, 2usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld2_s16_with_svst2_s16() {
     let mut storage = [0 as i16; 640usize];
     let data = svcreate2_s16(
-        svindex_s16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
-        svindex_s16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_s16((0usize) as i16, 2usize.try_into().unwrap()),
+        svindex_s16((1usize) as i16, 2usize.try_into().unwrap()),
     );
     svst2_s16(svptrue_b16(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4719,19 +4527,19 @@ unsafe fn test_svld2_s16_with_svst2_s16() {
     let loaded = svld2_s16(svptrue_b16(), storage.as_ptr() as *const i16);
     assert_vector_matches_i16(
         svget2_s16::<{ 0usize as i32 }>(loaded),
-        svindex_s16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_s16((0usize) as i16, 2usize.try_into().unwrap()),
     );
     assert_vector_matches_i16(
         svget2_s16::<{ 1usize as i32 }>(loaded),
-        svindex_s16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_s16((1usize) as i16, 2usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld2_s32_with_svst2_s32() {
     let mut storage = [0 as i32; 320usize];
     let data = svcreate2_s32(
-        svindex_s32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
-        svindex_s32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 2usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 2usize.try_into().unwrap()),
     );
     svst2_s32(svptrue_b32(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4741,19 +4549,19 @@ unsafe fn test_svld2_s32_with_svst2_s32() {
     let loaded = svld2_s32(svptrue_b32(), storage.as_ptr() as *const i32);
     assert_vector_matches_i32(
         svget2_s32::<{ 0usize as i32 }>(loaded),
-        svindex_s32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 2usize.try_into().unwrap()),
     );
     assert_vector_matches_i32(
         svget2_s32::<{ 1usize as i32 }>(loaded),
-        svindex_s32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 2usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld2_s64_with_svst2_s64() {
     let mut storage = [0 as i64; 160usize];
     let data = svcreate2_s64(
-        svindex_s64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
-        svindex_s64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 2usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 2usize.try_into().unwrap()),
     );
     svst2_s64(svptrue_b64(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4763,19 +4571,19 @@ unsafe fn test_svld2_s64_with_svst2_s64() {
     let loaded = svld2_s64(svptrue_b64(), storage.as_ptr() as *const i64);
     assert_vector_matches_i64(
         svget2_s64::<{ 0usize as i32 }>(loaded),
-        svindex_s64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 2usize.try_into().unwrap()),
     );
     assert_vector_matches_i64(
         svget2_s64::<{ 1usize as i32 }>(loaded),
-        svindex_s64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 2usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld2_u8_with_svst2_u8() {
     let mut storage = [0 as u8; 1280usize];
     let data = svcreate2_u8(
-        svindex_u8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
-        svindex_u8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_u8((0usize) as u8, 2usize.try_into().unwrap()),
+        svindex_u8((1usize) as u8, 2usize.try_into().unwrap()),
     );
     svst2_u8(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4785,19 +4593,19 @@ unsafe fn test_svld2_u8_with_svst2_u8() {
     let loaded = svld2_u8(svptrue_b8(), storage.as_ptr() as *const u8);
     assert_vector_matches_u8(
         svget2_u8::<{ 0usize as i32 }>(loaded),
-        svindex_u8((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_u8((0usize) as u8, 2usize.try_into().unwrap()),
     );
     assert_vector_matches_u8(
         svget2_u8::<{ 1usize as i32 }>(loaded),
-        svindex_u8((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_u8((1usize) as u8, 2usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld2_u16_with_svst2_u16() {
     let mut storage = [0 as u16; 640usize];
     let data = svcreate2_u16(
-        svindex_u16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
-        svindex_u16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_u16((0usize) as u16, 2usize.try_into().unwrap()),
+        svindex_u16((1usize) as u16, 2usize.try_into().unwrap()),
     );
     svst2_u16(svptrue_b16(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4807,19 +4615,19 @@ unsafe fn test_svld2_u16_with_svst2_u16() {
     let loaded = svld2_u16(svptrue_b16(), storage.as_ptr() as *const u16);
     assert_vector_matches_u16(
         svget2_u16::<{ 0usize as i32 }>(loaded),
-        svindex_u16((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_u16((0usize) as u16, 2usize.try_into().unwrap()),
     );
     assert_vector_matches_u16(
         svget2_u16::<{ 1usize as i32 }>(loaded),
-        svindex_u16((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_u16((1usize) as u16, 2usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld2_u32_with_svst2_u32() {
     let mut storage = [0 as u32; 320usize];
     let data = svcreate2_u32(
-        svindex_u32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
-        svindex_u32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 2usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 2usize.try_into().unwrap()),
     );
     svst2_u32(svptrue_b32(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4829,19 +4637,19 @@ unsafe fn test_svld2_u32_with_svst2_u32() {
     let loaded = svld2_u32(svptrue_b32(), storage.as_ptr() as *const u32);
     assert_vector_matches_u32(
         svget2_u32::<{ 0usize as i32 }>(loaded),
-        svindex_u32((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 2usize.try_into().unwrap()),
     );
     assert_vector_matches_u32(
         svget2_u32::<{ 1usize as i32 }>(loaded),
-        svindex_u32((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 2usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld2_u64_with_svst2_u64() {
     let mut storage = [0 as u64; 160usize];
     let data = svcreate2_u64(
-        svindex_u64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
-        svindex_u64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 2usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 2usize.try_into().unwrap()),
     );
     svst2_u64(svptrue_b64(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4851,11 +4659,11 @@ unsafe fn test_svld2_u64_with_svst2_u64() {
     let loaded = svld2_u64(svptrue_b64(), storage.as_ptr() as *const u64);
     assert_vector_matches_u64(
         svget2_u64::<{ 0usize as i32 }>(loaded),
-        svindex_u64((0usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 2usize.try_into().unwrap()),
     );
     assert_vector_matches_u64(
         svget2_u64::<{ 1usize as i32 }>(loaded),
-        svindex_u64((1usize).try_into().unwrap(), 2usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 2usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -4957,14 +4765,8 @@ unsafe fn test_svld2_vnum_s8_with_svst2_vnum_s8() {
     let len = svcntb() as usize;
     let mut storage = [0 as i8; 1280usize];
     let data = svcreate2_s8(
-        svindex_s8(
-            (len + 0usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
-        svindex_s8(
-            (len + 1usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_s8((len + 0usize) as i8, 2usize.try_into().unwrap()),
+        svindex_s8((len + 1usize) as i8, 2usize.try_into().unwrap()),
     );
     svst2_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -4974,17 +4776,11 @@ unsafe fn test_svld2_vnum_s8_with_svst2_vnum_s8() {
     let loaded = svld2_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1);
     assert_vector_matches_i8(
         svget2_s8::<{ 0usize as i32 }>(loaded),
-        svindex_s8(
-            (len + 0usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_s8((len + 0usize) as i8, 2usize.try_into().unwrap()),
     );
     assert_vector_matches_i8(
         svget2_s8::<{ 1usize as i32 }>(loaded),
-        svindex_s8(
-            (len + 1usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_s8((len + 1usize) as i8, 2usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -4992,14 +4788,8 @@ unsafe fn test_svld2_vnum_s16_with_svst2_vnum_s16() {
     let len = svcnth() as usize;
     let mut storage = [0 as i16; 640usize];
     let data = svcreate2_s16(
-        svindex_s16(
-            (len + 0usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
-        svindex_s16(
-            (len + 1usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 0usize) as i16, 2usize.try_into().unwrap()),
+        svindex_s16((len + 1usize) as i16, 2usize.try_into().unwrap()),
     );
     svst2_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5009,17 +4799,11 @@ unsafe fn test_svld2_vnum_s16_with_svst2_vnum_s16() {
     let loaded = svld2_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1);
     assert_vector_matches_i16(
         svget2_s16::<{ 0usize as i32 }>(loaded),
-        svindex_s16(
-            (len + 0usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 0usize) as i16, 2usize.try_into().unwrap()),
     );
     assert_vector_matches_i16(
         svget2_s16::<{ 1usize as i32 }>(loaded),
-        svindex_s16(
-            (len + 1usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 1usize) as i16, 2usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -5027,14 +4811,8 @@ unsafe fn test_svld2_vnum_s32_with_svst2_vnum_s32() {
     let len = svcntw() as usize;
     let mut storage = [0 as i32; 320usize];
     let data = svcreate2_s32(
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
-        svindex_s32(
-            (len + 1usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 2usize.try_into().unwrap()),
+        svindex_s32((len + 1usize) as i32, 2usize.try_into().unwrap()),
     );
     svst2_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5044,17 +4822,11 @@ unsafe fn test_svld2_vnum_s32_with_svst2_vnum_s32() {
     let loaded = svld2_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1);
     assert_vector_matches_i32(
         svget2_s32::<{ 0usize as i32 }>(loaded),
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 2usize.try_into().unwrap()),
     );
     assert_vector_matches_i32(
         svget2_s32::<{ 1usize as i32 }>(loaded),
-        svindex_s32(
-            (len + 1usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 1usize) as i32, 2usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -5062,14 +4834,8 @@ unsafe fn test_svld2_vnum_s64_with_svst2_vnum_s64() {
     let len = svcntd() as usize;
     let mut storage = [0 as i64; 160usize];
     let data = svcreate2_s64(
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
-        svindex_s64(
-            (len + 1usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 2usize.try_into().unwrap()),
+        svindex_s64((len + 1usize) as i64, 2usize.try_into().unwrap()),
     );
     svst2_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5079,17 +4845,11 @@ unsafe fn test_svld2_vnum_s64_with_svst2_vnum_s64() {
     let loaded = svld2_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1);
     assert_vector_matches_i64(
         svget2_s64::<{ 0usize as i32 }>(loaded),
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 2usize.try_into().unwrap()),
     );
     assert_vector_matches_i64(
         svget2_s64::<{ 1usize as i32 }>(loaded),
-        svindex_s64(
-            (len + 1usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 1usize) as i64, 2usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -5097,14 +4857,8 @@ unsafe fn test_svld2_vnum_u8_with_svst2_vnum_u8() {
     let len = svcntb() as usize;
     let mut storage = [0 as u8; 1280usize];
     let data = svcreate2_u8(
-        svindex_u8(
-            (len + 0usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
-        svindex_u8(
-            (len + 1usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_u8((len + 0usize) as u8, 2usize.try_into().unwrap()),
+        svindex_u8((len + 1usize) as u8, 2usize.try_into().unwrap()),
     );
     svst2_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5114,17 +4868,11 @@ unsafe fn test_svld2_vnum_u8_with_svst2_vnum_u8() {
     let loaded = svld2_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1);
     assert_vector_matches_u8(
         svget2_u8::<{ 0usize as i32 }>(loaded),
-        svindex_u8(
-            (len + 0usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_u8((len + 0usize) as u8, 2usize.try_into().unwrap()),
     );
     assert_vector_matches_u8(
         svget2_u8::<{ 1usize as i32 }>(loaded),
-        svindex_u8(
-            (len + 1usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_u8((len + 1usize) as u8, 2usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -5132,14 +4880,8 @@ unsafe fn test_svld2_vnum_u16_with_svst2_vnum_u16() {
     let len = svcnth() as usize;
     let mut storage = [0 as u16; 640usize];
     let data = svcreate2_u16(
-        svindex_u16(
-            (len + 0usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
-        svindex_u16(
-            (len + 1usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 0usize) as u16, 2usize.try_into().unwrap()),
+        svindex_u16((len + 1usize) as u16, 2usize.try_into().unwrap()),
     );
     svst2_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5149,17 +4891,11 @@ unsafe fn test_svld2_vnum_u16_with_svst2_vnum_u16() {
     let loaded = svld2_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1);
     assert_vector_matches_u16(
         svget2_u16::<{ 0usize as i32 }>(loaded),
-        svindex_u16(
-            (len + 0usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 0usize) as u16, 2usize.try_into().unwrap()),
     );
     assert_vector_matches_u16(
         svget2_u16::<{ 1usize as i32 }>(loaded),
-        svindex_u16(
-            (len + 1usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 1usize) as u16, 2usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -5167,14 +4903,8 @@ unsafe fn test_svld2_vnum_u32_with_svst2_vnum_u32() {
     let len = svcntw() as usize;
     let mut storage = [0 as u32; 320usize];
     let data = svcreate2_u32(
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
-        svindex_u32(
-            (len + 1usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 2usize.try_into().unwrap()),
+        svindex_u32((len + 1usize) as u32, 2usize.try_into().unwrap()),
     );
     svst2_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5184,17 +4914,11 @@ unsafe fn test_svld2_vnum_u32_with_svst2_vnum_u32() {
     let loaded = svld2_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1);
     assert_vector_matches_u32(
         svget2_u32::<{ 0usize as i32 }>(loaded),
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 2usize.try_into().unwrap()),
     );
     assert_vector_matches_u32(
         svget2_u32::<{ 1usize as i32 }>(loaded),
-        svindex_u32(
-            (len + 1usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 1usize) as u32, 2usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -5202,14 +4926,8 @@ unsafe fn test_svld2_vnum_u64_with_svst2_vnum_u64() {
     let len = svcntd() as usize;
     let mut storage = [0 as u64; 160usize];
     let data = svcreate2_u64(
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
-        svindex_u64(
-            (len + 1usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 2usize.try_into().unwrap()),
+        svindex_u64((len + 1usize) as u64, 2usize.try_into().unwrap()),
     );
     svst2_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5219,17 +4937,11 @@ unsafe fn test_svld2_vnum_u64_with_svst2_vnum_u64() {
     let loaded = svld2_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1);
     assert_vector_matches_u64(
         svget2_u64::<{ 0usize as i32 }>(loaded),
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 2usize.try_into().unwrap()),
     );
     assert_vector_matches_u64(
         svget2_u64::<{ 1usize as i32 }>(loaded),
-        svindex_u64(
-            (len + 1usize).try_into().unwrap(),
-            2usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 1usize) as u64, 2usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -5326,9 +5038,9 @@ unsafe fn test_svld3_f64_with_svst3_f64() {
 unsafe fn test_svld3_s8_with_svst3_s8() {
     let mut storage = [0 as i8; 1280usize];
     let data = svcreate3_s8(
-        svindex_s8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
-        svindex_s8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
-        svindex_s8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s8((0usize) as i8, 3usize.try_into().unwrap()),
+        svindex_s8((1usize) as i8, 3usize.try_into().unwrap()),
+        svindex_s8((2usize) as i8, 3usize.try_into().unwrap()),
     );
     svst3_s8(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5338,24 +5050,24 @@ unsafe fn test_svld3_s8_with_svst3_s8() {
     let loaded = svld3_s8(svptrue_b8(), storage.as_ptr() as *const i8);
     assert_vector_matches_i8(
         svget3_s8::<{ 0usize as i32 }>(loaded),
-        svindex_s8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s8((0usize) as i8, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_i8(
         svget3_s8::<{ 1usize as i32 }>(loaded),
-        svindex_s8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s8((1usize) as i8, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_i8(
         svget3_s8::<{ 2usize as i32 }>(loaded),
-        svindex_s8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s8((2usize) as i8, 3usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld3_s16_with_svst3_s16() {
     let mut storage = [0 as i16; 640usize];
     let data = svcreate3_s16(
-        svindex_s16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
-        svindex_s16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
-        svindex_s16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s16((0usize) as i16, 3usize.try_into().unwrap()),
+        svindex_s16((1usize) as i16, 3usize.try_into().unwrap()),
+        svindex_s16((2usize) as i16, 3usize.try_into().unwrap()),
     );
     svst3_s16(svptrue_b16(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5365,24 +5077,24 @@ unsafe fn test_svld3_s16_with_svst3_s16() {
     let loaded = svld3_s16(svptrue_b16(), storage.as_ptr() as *const i16);
     assert_vector_matches_i16(
         svget3_s16::<{ 0usize as i32 }>(loaded),
-        svindex_s16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s16((0usize) as i16, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_i16(
         svget3_s16::<{ 1usize as i32 }>(loaded),
-        svindex_s16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s16((1usize) as i16, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_i16(
         svget3_s16::<{ 2usize as i32 }>(loaded),
-        svindex_s16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s16((2usize) as i16, 3usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld3_s32_with_svst3_s32() {
     let mut storage = [0 as i32; 320usize];
     let data = svcreate3_s32(
-        svindex_s32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
-        svindex_s32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
-        svindex_s32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 3usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 3usize.try_into().unwrap()),
+        svindex_s32((2usize) as i32, 3usize.try_into().unwrap()),
     );
     svst3_s32(svptrue_b32(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5392,24 +5104,24 @@ unsafe fn test_svld3_s32_with_svst3_s32() {
     let loaded = svld3_s32(svptrue_b32(), storage.as_ptr() as *const i32);
     assert_vector_matches_i32(
         svget3_s32::<{ 0usize as i32 }>(loaded),
-        svindex_s32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_i32(
         svget3_s32::<{ 1usize as i32 }>(loaded),
-        svindex_s32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_i32(
         svget3_s32::<{ 2usize as i32 }>(loaded),
-        svindex_s32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s32((2usize) as i32, 3usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld3_s64_with_svst3_s64() {
     let mut storage = [0 as i64; 160usize];
     let data = svcreate3_s64(
-        svindex_s64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
-        svindex_s64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
-        svindex_s64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 3usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 3usize.try_into().unwrap()),
+        svindex_s64((2usize) as i64, 3usize.try_into().unwrap()),
     );
     svst3_s64(svptrue_b64(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5419,24 +5131,24 @@ unsafe fn test_svld3_s64_with_svst3_s64() {
     let loaded = svld3_s64(svptrue_b64(), storage.as_ptr() as *const i64);
     assert_vector_matches_i64(
         svget3_s64::<{ 0usize as i32 }>(loaded),
-        svindex_s64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_i64(
         svget3_s64::<{ 1usize as i32 }>(loaded),
-        svindex_s64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_i64(
         svget3_s64::<{ 2usize as i32 }>(loaded),
-        svindex_s64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_s64((2usize) as i64, 3usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld3_u8_with_svst3_u8() {
     let mut storage = [0 as u8; 1280usize];
     let data = svcreate3_u8(
-        svindex_u8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
-        svindex_u8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
-        svindex_u8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u8((0usize) as u8, 3usize.try_into().unwrap()),
+        svindex_u8((1usize) as u8, 3usize.try_into().unwrap()),
+        svindex_u8((2usize) as u8, 3usize.try_into().unwrap()),
     );
     svst3_u8(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5446,24 +5158,24 @@ unsafe fn test_svld3_u8_with_svst3_u8() {
     let loaded = svld3_u8(svptrue_b8(), storage.as_ptr() as *const u8);
     assert_vector_matches_u8(
         svget3_u8::<{ 0usize as i32 }>(loaded),
-        svindex_u8((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u8((0usize) as u8, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_u8(
         svget3_u8::<{ 1usize as i32 }>(loaded),
-        svindex_u8((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u8((1usize) as u8, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_u8(
         svget3_u8::<{ 2usize as i32 }>(loaded),
-        svindex_u8((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u8((2usize) as u8, 3usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld3_u16_with_svst3_u16() {
     let mut storage = [0 as u16; 640usize];
     let data = svcreate3_u16(
-        svindex_u16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
-        svindex_u16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
-        svindex_u16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u16((0usize) as u16, 3usize.try_into().unwrap()),
+        svindex_u16((1usize) as u16, 3usize.try_into().unwrap()),
+        svindex_u16((2usize) as u16, 3usize.try_into().unwrap()),
     );
     svst3_u16(svptrue_b16(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5473,24 +5185,24 @@ unsafe fn test_svld3_u16_with_svst3_u16() {
     let loaded = svld3_u16(svptrue_b16(), storage.as_ptr() as *const u16);
     assert_vector_matches_u16(
         svget3_u16::<{ 0usize as i32 }>(loaded),
-        svindex_u16((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u16((0usize) as u16, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_u16(
         svget3_u16::<{ 1usize as i32 }>(loaded),
-        svindex_u16((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u16((1usize) as u16, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_u16(
         svget3_u16::<{ 2usize as i32 }>(loaded),
-        svindex_u16((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u16((2usize) as u16, 3usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld3_u32_with_svst3_u32() {
     let mut storage = [0 as u32; 320usize];
     let data = svcreate3_u32(
-        svindex_u32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
-        svindex_u32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
-        svindex_u32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 3usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 3usize.try_into().unwrap()),
+        svindex_u32((2usize) as u32, 3usize.try_into().unwrap()),
     );
     svst3_u32(svptrue_b32(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5500,24 +5212,24 @@ unsafe fn test_svld3_u32_with_svst3_u32() {
     let loaded = svld3_u32(svptrue_b32(), storage.as_ptr() as *const u32);
     assert_vector_matches_u32(
         svget3_u32::<{ 0usize as i32 }>(loaded),
-        svindex_u32((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_u32(
         svget3_u32::<{ 1usize as i32 }>(loaded),
-        svindex_u32((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_u32(
         svget3_u32::<{ 2usize as i32 }>(loaded),
-        svindex_u32((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u32((2usize) as u32, 3usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld3_u64_with_svst3_u64() {
     let mut storage = [0 as u64; 160usize];
     let data = svcreate3_u64(
-        svindex_u64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
-        svindex_u64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
-        svindex_u64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 3usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 3usize.try_into().unwrap()),
+        svindex_u64((2usize) as u64, 3usize.try_into().unwrap()),
     );
     svst3_u64(svptrue_b64(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5527,15 +5239,15 @@ unsafe fn test_svld3_u64_with_svst3_u64() {
     let loaded = svld3_u64(svptrue_b64(), storage.as_ptr() as *const u64);
     assert_vector_matches_u64(
         svget3_u64::<{ 0usize as i32 }>(loaded),
-        svindex_u64((0usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_u64(
         svget3_u64::<{ 1usize as i32 }>(loaded),
-        svindex_u64((1usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_u64(
         svget3_u64::<{ 2usize as i32 }>(loaded),
-        svindex_u64((2usize).try_into().unwrap(), 3usize.try_into().unwrap()),
+        svindex_u64((2usize) as u64, 3usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -5671,18 +5383,9 @@ unsafe fn test_svld3_vnum_s8_with_svst3_vnum_s8() {
     let len = svcntb() as usize;
     let mut storage = [0 as i8; 1280usize];
     let data = svcreate3_s8(
-        svindex_s8(
-            (len + 0usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
-        svindex_s8(
-            (len + 1usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
-        svindex_s8(
-            (len + 2usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_s8((len + 0usize) as i8, 3usize.try_into().unwrap()),
+        svindex_s8((len + 1usize) as i8, 3usize.try_into().unwrap()),
+        svindex_s8((len + 2usize) as i8, 3usize.try_into().unwrap()),
     );
     svst3_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5692,24 +5395,15 @@ unsafe fn test_svld3_vnum_s8_with_svst3_vnum_s8() {
     let loaded = svld3_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1);
     assert_vector_matches_i8(
         svget3_s8::<{ 0usize as i32 }>(loaded),
-        svindex_s8(
-            (len + 0usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_s8((len + 0usize) as i8, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_i8(
         svget3_s8::<{ 1usize as i32 }>(loaded),
-        svindex_s8(
-            (len + 1usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_s8((len + 1usize) as i8, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_i8(
         svget3_s8::<{ 2usize as i32 }>(loaded),
-        svindex_s8(
-            (len + 2usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_s8((len + 2usize) as i8, 3usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -5717,18 +5411,9 @@ unsafe fn test_svld3_vnum_s16_with_svst3_vnum_s16() {
     let len = svcnth() as usize;
     let mut storage = [0 as i16; 640usize];
     let data = svcreate3_s16(
-        svindex_s16(
-            (len + 0usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
-        svindex_s16(
-            (len + 1usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
-        svindex_s16(
-            (len + 2usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 0usize) as i16, 3usize.try_into().unwrap()),
+        svindex_s16((len + 1usize) as i16, 3usize.try_into().unwrap()),
+        svindex_s16((len + 2usize) as i16, 3usize.try_into().unwrap()),
     );
     svst3_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5738,24 +5423,15 @@ unsafe fn test_svld3_vnum_s16_with_svst3_vnum_s16() {
     let loaded = svld3_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1);
     assert_vector_matches_i16(
         svget3_s16::<{ 0usize as i32 }>(loaded),
-        svindex_s16(
-            (len + 0usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 0usize) as i16, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_i16(
         svget3_s16::<{ 1usize as i32 }>(loaded),
-        svindex_s16(
-            (len + 1usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 1usize) as i16, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_i16(
         svget3_s16::<{ 2usize as i32 }>(loaded),
-        svindex_s16(
-            (len + 2usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 2usize) as i16, 3usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -5763,18 +5439,9 @@ unsafe fn test_svld3_vnum_s32_with_svst3_vnum_s32() {
     let len = svcntw() as usize;
     let mut storage = [0 as i32; 320usize];
     let data = svcreate3_s32(
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
-        svindex_s32(
-            (len + 1usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
-        svindex_s32(
-            (len + 2usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 3usize.try_into().unwrap()),
+        svindex_s32((len + 1usize) as i32, 3usize.try_into().unwrap()),
+        svindex_s32((len + 2usize) as i32, 3usize.try_into().unwrap()),
     );
     svst3_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5784,24 +5451,15 @@ unsafe fn test_svld3_vnum_s32_with_svst3_vnum_s32() {
     let loaded = svld3_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1);
     assert_vector_matches_i32(
         svget3_s32::<{ 0usize as i32 }>(loaded),
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_i32(
         svget3_s32::<{ 1usize as i32 }>(loaded),
-        svindex_s32(
-            (len + 1usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 1usize) as i32, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_i32(
         svget3_s32::<{ 2usize as i32 }>(loaded),
-        svindex_s32(
-            (len + 2usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 2usize) as i32, 3usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -5809,18 +5467,9 @@ unsafe fn test_svld3_vnum_s64_with_svst3_vnum_s64() {
     let len = svcntd() as usize;
     let mut storage = [0 as i64; 160usize];
     let data = svcreate3_s64(
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
-        svindex_s64(
-            (len + 1usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
-        svindex_s64(
-            (len + 2usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 3usize.try_into().unwrap()),
+        svindex_s64((len + 1usize) as i64, 3usize.try_into().unwrap()),
+        svindex_s64((len + 2usize) as i64, 3usize.try_into().unwrap()),
     );
     svst3_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5830,24 +5479,15 @@ unsafe fn test_svld3_vnum_s64_with_svst3_vnum_s64() {
     let loaded = svld3_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1);
     assert_vector_matches_i64(
         svget3_s64::<{ 0usize as i32 }>(loaded),
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_i64(
         svget3_s64::<{ 1usize as i32 }>(loaded),
-        svindex_s64(
-            (len + 1usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 1usize) as i64, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_i64(
         svget3_s64::<{ 2usize as i32 }>(loaded),
-        svindex_s64(
-            (len + 2usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 2usize) as i64, 3usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -5855,18 +5495,9 @@ unsafe fn test_svld3_vnum_u8_with_svst3_vnum_u8() {
     let len = svcntb() as usize;
     let mut storage = [0 as u8; 1280usize];
     let data = svcreate3_u8(
-        svindex_u8(
-            (len + 0usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
-        svindex_u8(
-            (len + 1usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
-        svindex_u8(
-            (len + 2usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_u8((len + 0usize) as u8, 3usize.try_into().unwrap()),
+        svindex_u8((len + 1usize) as u8, 3usize.try_into().unwrap()),
+        svindex_u8((len + 2usize) as u8, 3usize.try_into().unwrap()),
     );
     svst3_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5876,24 +5507,15 @@ unsafe fn test_svld3_vnum_u8_with_svst3_vnum_u8() {
     let loaded = svld3_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1);
     assert_vector_matches_u8(
         svget3_u8::<{ 0usize as i32 }>(loaded),
-        svindex_u8(
-            (len + 0usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_u8((len + 0usize) as u8, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_u8(
         svget3_u8::<{ 1usize as i32 }>(loaded),
-        svindex_u8(
-            (len + 1usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_u8((len + 1usize) as u8, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_u8(
         svget3_u8::<{ 2usize as i32 }>(loaded),
-        svindex_u8(
-            (len + 2usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_u8((len + 2usize) as u8, 3usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -5901,18 +5523,9 @@ unsafe fn test_svld3_vnum_u16_with_svst3_vnum_u16() {
     let len = svcnth() as usize;
     let mut storage = [0 as u16; 640usize];
     let data = svcreate3_u16(
-        svindex_u16(
-            (len + 0usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
-        svindex_u16(
-            (len + 1usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
-        svindex_u16(
-            (len + 2usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 0usize) as u16, 3usize.try_into().unwrap()),
+        svindex_u16((len + 1usize) as u16, 3usize.try_into().unwrap()),
+        svindex_u16((len + 2usize) as u16, 3usize.try_into().unwrap()),
     );
     svst3_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5922,24 +5535,15 @@ unsafe fn test_svld3_vnum_u16_with_svst3_vnum_u16() {
     let loaded = svld3_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1);
     assert_vector_matches_u16(
         svget3_u16::<{ 0usize as i32 }>(loaded),
-        svindex_u16(
-            (len + 0usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 0usize) as u16, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_u16(
         svget3_u16::<{ 1usize as i32 }>(loaded),
-        svindex_u16(
-            (len + 1usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 1usize) as u16, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_u16(
         svget3_u16::<{ 2usize as i32 }>(loaded),
-        svindex_u16(
-            (len + 2usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 2usize) as u16, 3usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -5947,18 +5551,9 @@ unsafe fn test_svld3_vnum_u32_with_svst3_vnum_u32() {
     let len = svcntw() as usize;
     let mut storage = [0 as u32; 320usize];
     let data = svcreate3_u32(
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
-        svindex_u32(
-            (len + 1usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
-        svindex_u32(
-            (len + 2usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 3usize.try_into().unwrap()),
+        svindex_u32((len + 1usize) as u32, 3usize.try_into().unwrap()),
+        svindex_u32((len + 2usize) as u32, 3usize.try_into().unwrap()),
     );
     svst3_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -5968,24 +5563,15 @@ unsafe fn test_svld3_vnum_u32_with_svst3_vnum_u32() {
     let loaded = svld3_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1);
     assert_vector_matches_u32(
         svget3_u32::<{ 0usize as i32 }>(loaded),
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_u32(
         svget3_u32::<{ 1usize as i32 }>(loaded),
-        svindex_u32(
-            (len + 1usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 1usize) as u32, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_u32(
         svget3_u32::<{ 2usize as i32 }>(loaded),
-        svindex_u32(
-            (len + 2usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 2usize) as u32, 3usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -5993,18 +5579,9 @@ unsafe fn test_svld3_vnum_u64_with_svst3_vnum_u64() {
     let len = svcntd() as usize;
     let mut storage = [0 as u64; 160usize];
     let data = svcreate3_u64(
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
-        svindex_u64(
-            (len + 1usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
-        svindex_u64(
-            (len + 2usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 3usize.try_into().unwrap()),
+        svindex_u64((len + 1usize) as u64, 3usize.try_into().unwrap()),
+        svindex_u64((len + 2usize) as u64, 3usize.try_into().unwrap()),
     );
     svst3_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -6014,24 +5591,15 @@ unsafe fn test_svld3_vnum_u64_with_svst3_vnum_u64() {
     let loaded = svld3_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1);
     assert_vector_matches_u64(
         svget3_u64::<{ 0usize as i32 }>(loaded),
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_u64(
         svget3_u64::<{ 1usize as i32 }>(loaded),
-        svindex_u64(
-            (len + 1usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 1usize) as u64, 3usize.try_into().unwrap()),
     );
     assert_vector_matches_u64(
         svget3_u64::<{ 2usize as i32 }>(loaded),
-        svindex_u64(
-            (len + 2usize).try_into().unwrap(),
-            3usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 2usize) as u64, 3usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -6150,10 +5718,10 @@ unsafe fn test_svld4_f64_with_svst4_f64() {
 unsafe fn test_svld4_s8_with_svst4_s8() {
     let mut storage = [0 as i8; 1280usize];
     let data = svcreate4_s8(
-        svindex_s8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_s8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_s8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_s8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s8((0usize) as i8, 4usize.try_into().unwrap()),
+        svindex_s8((1usize) as i8, 4usize.try_into().unwrap()),
+        svindex_s8((2usize) as i8, 4usize.try_into().unwrap()),
+        svindex_s8((3usize) as i8, 4usize.try_into().unwrap()),
     );
     svst4_s8(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -6163,29 +5731,29 @@ unsafe fn test_svld4_s8_with_svst4_s8() {
     let loaded = svld4_s8(svptrue_b8(), storage.as_ptr() as *const i8);
     assert_vector_matches_i8(
         svget4_s8::<{ 0usize as i32 }>(loaded),
-        svindex_s8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s8((0usize) as i8, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i8(
         svget4_s8::<{ 1usize as i32 }>(loaded),
-        svindex_s8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s8((1usize) as i8, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i8(
         svget4_s8::<{ 2usize as i32 }>(loaded),
-        svindex_s8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s8((2usize) as i8, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i8(
         svget4_s8::<{ 3usize as i32 }>(loaded),
-        svindex_s8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s8((3usize) as i8, 4usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld4_s16_with_svst4_s16() {
     let mut storage = [0 as i16; 640usize];
     let data = svcreate4_s16(
-        svindex_s16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_s16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_s16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_s16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s16((0usize) as i16, 4usize.try_into().unwrap()),
+        svindex_s16((1usize) as i16, 4usize.try_into().unwrap()),
+        svindex_s16((2usize) as i16, 4usize.try_into().unwrap()),
+        svindex_s16((3usize) as i16, 4usize.try_into().unwrap()),
     );
     svst4_s16(svptrue_b16(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -6195,29 +5763,29 @@ unsafe fn test_svld4_s16_with_svst4_s16() {
     let loaded = svld4_s16(svptrue_b16(), storage.as_ptr() as *const i16);
     assert_vector_matches_i16(
         svget4_s16::<{ 0usize as i32 }>(loaded),
-        svindex_s16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s16((0usize) as i16, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i16(
         svget4_s16::<{ 1usize as i32 }>(loaded),
-        svindex_s16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s16((1usize) as i16, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i16(
         svget4_s16::<{ 2usize as i32 }>(loaded),
-        svindex_s16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s16((2usize) as i16, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i16(
         svget4_s16::<{ 3usize as i32 }>(loaded),
-        svindex_s16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s16((3usize) as i16, 4usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld4_s32_with_svst4_s32() {
     let mut storage = [0 as i32; 320usize];
     let data = svcreate4_s32(
-        svindex_s32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_s32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_s32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_s32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 4usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 4usize.try_into().unwrap()),
+        svindex_s32((2usize) as i32, 4usize.try_into().unwrap()),
+        svindex_s32((3usize) as i32, 4usize.try_into().unwrap()),
     );
     svst4_s32(svptrue_b32(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -6227,29 +5795,29 @@ unsafe fn test_svld4_s32_with_svst4_s32() {
     let loaded = svld4_s32(svptrue_b32(), storage.as_ptr() as *const i32);
     assert_vector_matches_i32(
         svget4_s32::<{ 0usize as i32 }>(loaded),
-        svindex_s32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i32(
         svget4_s32::<{ 1usize as i32 }>(loaded),
-        svindex_s32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i32(
         svget4_s32::<{ 2usize as i32 }>(loaded),
-        svindex_s32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s32((2usize) as i32, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i32(
         svget4_s32::<{ 3usize as i32 }>(loaded),
-        svindex_s32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s32((3usize) as i32, 4usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld4_s64_with_svst4_s64() {
     let mut storage = [0 as i64; 160usize];
     let data = svcreate4_s64(
-        svindex_s64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_s64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_s64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_s64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 4usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 4usize.try_into().unwrap()),
+        svindex_s64((2usize) as i64, 4usize.try_into().unwrap()),
+        svindex_s64((3usize) as i64, 4usize.try_into().unwrap()),
     );
     svst4_s64(svptrue_b64(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -6259,29 +5827,29 @@ unsafe fn test_svld4_s64_with_svst4_s64() {
     let loaded = svld4_s64(svptrue_b64(), storage.as_ptr() as *const i64);
     assert_vector_matches_i64(
         svget4_s64::<{ 0usize as i32 }>(loaded),
-        svindex_s64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i64(
         svget4_s64::<{ 1usize as i32 }>(loaded),
-        svindex_s64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i64(
         svget4_s64::<{ 2usize as i32 }>(loaded),
-        svindex_s64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s64((2usize) as i64, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i64(
         svget4_s64::<{ 3usize as i32 }>(loaded),
-        svindex_s64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_s64((3usize) as i64, 4usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld4_u8_with_svst4_u8() {
     let mut storage = [0 as u8; 1280usize];
     let data = svcreate4_u8(
-        svindex_u8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_u8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_u8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_u8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u8((0usize) as u8, 4usize.try_into().unwrap()),
+        svindex_u8((1usize) as u8, 4usize.try_into().unwrap()),
+        svindex_u8((2usize) as u8, 4usize.try_into().unwrap()),
+        svindex_u8((3usize) as u8, 4usize.try_into().unwrap()),
     );
     svst4_u8(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -6291,29 +5859,29 @@ unsafe fn test_svld4_u8_with_svst4_u8() {
     let loaded = svld4_u8(svptrue_b8(), storage.as_ptr() as *const u8);
     assert_vector_matches_u8(
         svget4_u8::<{ 0usize as i32 }>(loaded),
-        svindex_u8((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u8((0usize) as u8, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u8(
         svget4_u8::<{ 1usize as i32 }>(loaded),
-        svindex_u8((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u8((1usize) as u8, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u8(
         svget4_u8::<{ 2usize as i32 }>(loaded),
-        svindex_u8((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u8((2usize) as u8, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u8(
         svget4_u8::<{ 3usize as i32 }>(loaded),
-        svindex_u8((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u8((3usize) as u8, 4usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld4_u16_with_svst4_u16() {
     let mut storage = [0 as u16; 640usize];
     let data = svcreate4_u16(
-        svindex_u16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_u16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_u16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_u16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u16((0usize) as u16, 4usize.try_into().unwrap()),
+        svindex_u16((1usize) as u16, 4usize.try_into().unwrap()),
+        svindex_u16((2usize) as u16, 4usize.try_into().unwrap()),
+        svindex_u16((3usize) as u16, 4usize.try_into().unwrap()),
     );
     svst4_u16(svptrue_b16(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -6323,29 +5891,29 @@ unsafe fn test_svld4_u16_with_svst4_u16() {
     let loaded = svld4_u16(svptrue_b16(), storage.as_ptr() as *const u16);
     assert_vector_matches_u16(
         svget4_u16::<{ 0usize as i32 }>(loaded),
-        svindex_u16((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u16((0usize) as u16, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u16(
         svget4_u16::<{ 1usize as i32 }>(loaded),
-        svindex_u16((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u16((1usize) as u16, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u16(
         svget4_u16::<{ 2usize as i32 }>(loaded),
-        svindex_u16((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u16((2usize) as u16, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u16(
         svget4_u16::<{ 3usize as i32 }>(loaded),
-        svindex_u16((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u16((3usize) as u16, 4usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld4_u32_with_svst4_u32() {
     let mut storage = [0 as u32; 320usize];
     let data = svcreate4_u32(
-        svindex_u32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_u32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_u32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_u32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 4usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 4usize.try_into().unwrap()),
+        svindex_u32((2usize) as u32, 4usize.try_into().unwrap()),
+        svindex_u32((3usize) as u32, 4usize.try_into().unwrap()),
     );
     svst4_u32(svptrue_b32(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -6355,29 +5923,29 @@ unsafe fn test_svld4_u32_with_svst4_u32() {
     let loaded = svld4_u32(svptrue_b32(), storage.as_ptr() as *const u32);
     assert_vector_matches_u32(
         svget4_u32::<{ 0usize as i32 }>(loaded),
-        svindex_u32((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u32(
         svget4_u32::<{ 1usize as i32 }>(loaded),
-        svindex_u32((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u32(
         svget4_u32::<{ 2usize as i32 }>(loaded),
-        svindex_u32((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u32((2usize) as u32, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u32(
         svget4_u32::<{ 3usize as i32 }>(loaded),
-        svindex_u32((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u32((3usize) as u32, 4usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svld4_u64_with_svst4_u64() {
     let mut storage = [0 as u64; 160usize];
     let data = svcreate4_u64(
-        svindex_u64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_u64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_u64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
-        svindex_u64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 4usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 4usize.try_into().unwrap()),
+        svindex_u64((2usize) as u64, 4usize.try_into().unwrap()),
+        svindex_u64((3usize) as u64, 4usize.try_into().unwrap()),
     );
     svst4_u64(svptrue_b64(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
@@ -6387,19 +5955,19 @@ unsafe fn test_svld4_u64_with_svst4_u64() {
     let loaded = svld4_u64(svptrue_b64(), storage.as_ptr() as *const u64);
     assert_vector_matches_u64(
         svget4_u64::<{ 0usize as i32 }>(loaded),
-        svindex_u64((0usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u64(
         svget4_u64::<{ 1usize as i32 }>(loaded),
-        svindex_u64((1usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u64(
         svget4_u64::<{ 2usize as i32 }>(loaded),
-        svindex_u64((2usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u64((2usize) as u64, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u64(
         svget4_u64::<{ 3usize as i32 }>(loaded),
-        svindex_u64((3usize).try_into().unwrap(), 4usize.try_into().unwrap()),
+        svindex_u64((3usize) as u64, 4usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -6569,22 +6137,10 @@ unsafe fn test_svld4_vnum_s8_with_svst4_vnum_s8() {
     let len = svcntb() as usize;
     let mut storage = [0 as i8; 1280usize];
     let data = svcreate4_s8(
-        svindex_s8(
-            (len + 0usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_s8(
-            (len + 1usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_s8(
-            (len + 2usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_s8(
-            (len + 3usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s8((len + 0usize) as i8, 4usize.try_into().unwrap()),
+        svindex_s8((len + 1usize) as i8, 4usize.try_into().unwrap()),
+        svindex_s8((len + 2usize) as i8, 4usize.try_into().unwrap()),
+        svindex_s8((len + 3usize) as i8, 4usize.try_into().unwrap()),
     );
     svst4_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -6594,31 +6150,19 @@ unsafe fn test_svld4_vnum_s8_with_svst4_vnum_s8() {
     let loaded = svld4_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1);
     assert_vector_matches_i8(
         svget4_s8::<{ 0usize as i32 }>(loaded),
-        svindex_s8(
-            (len + 0usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s8((len + 0usize) as i8, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i8(
         svget4_s8::<{ 1usize as i32 }>(loaded),
-        svindex_s8(
-            (len + 1usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s8((len + 1usize) as i8, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i8(
         svget4_s8::<{ 2usize as i32 }>(loaded),
-        svindex_s8(
-            (len + 2usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s8((len + 2usize) as i8, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i8(
         svget4_s8::<{ 3usize as i32 }>(loaded),
-        svindex_s8(
-            (len + 3usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s8((len + 3usize) as i8, 4usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -6626,22 +6170,10 @@ unsafe fn test_svld4_vnum_s16_with_svst4_vnum_s16() {
     let len = svcnth() as usize;
     let mut storage = [0 as i16; 640usize];
     let data = svcreate4_s16(
-        svindex_s16(
-            (len + 0usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_s16(
-            (len + 1usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_s16(
-            (len + 2usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_s16(
-            (len + 3usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 0usize) as i16, 4usize.try_into().unwrap()),
+        svindex_s16((len + 1usize) as i16, 4usize.try_into().unwrap()),
+        svindex_s16((len + 2usize) as i16, 4usize.try_into().unwrap()),
+        svindex_s16((len + 3usize) as i16, 4usize.try_into().unwrap()),
     );
     svst4_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -6651,31 +6183,19 @@ unsafe fn test_svld4_vnum_s16_with_svst4_vnum_s16() {
     let loaded = svld4_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1);
     assert_vector_matches_i16(
         svget4_s16::<{ 0usize as i32 }>(loaded),
-        svindex_s16(
-            (len + 0usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 0usize) as i16, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i16(
         svget4_s16::<{ 1usize as i32 }>(loaded),
-        svindex_s16(
-            (len + 1usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 1usize) as i16, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i16(
         svget4_s16::<{ 2usize as i32 }>(loaded),
-        svindex_s16(
-            (len + 2usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 2usize) as i16, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i16(
         svget4_s16::<{ 3usize as i32 }>(loaded),
-        svindex_s16(
-            (len + 3usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 3usize) as i16, 4usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -6683,22 +6203,10 @@ unsafe fn test_svld4_vnum_s32_with_svst4_vnum_s32() {
     let len = svcntw() as usize;
     let mut storage = [0 as i32; 320usize];
     let data = svcreate4_s32(
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_s32(
-            (len + 1usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_s32(
-            (len + 2usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_s32(
-            (len + 3usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 4usize.try_into().unwrap()),
+        svindex_s32((len + 1usize) as i32, 4usize.try_into().unwrap()),
+        svindex_s32((len + 2usize) as i32, 4usize.try_into().unwrap()),
+        svindex_s32((len + 3usize) as i32, 4usize.try_into().unwrap()),
     );
     svst4_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -6708,31 +6216,19 @@ unsafe fn test_svld4_vnum_s32_with_svst4_vnum_s32() {
     let loaded = svld4_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1);
     assert_vector_matches_i32(
         svget4_s32::<{ 0usize as i32 }>(loaded),
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i32(
         svget4_s32::<{ 1usize as i32 }>(loaded),
-        svindex_s32(
-            (len + 1usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 1usize) as i32, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i32(
         svget4_s32::<{ 2usize as i32 }>(loaded),
-        svindex_s32(
-            (len + 2usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 2usize) as i32, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i32(
         svget4_s32::<{ 3usize as i32 }>(loaded),
-        svindex_s32(
-            (len + 3usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 3usize) as i32, 4usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -6740,22 +6236,10 @@ unsafe fn test_svld4_vnum_s64_with_svst4_vnum_s64() {
     let len = svcntd() as usize;
     let mut storage = [0 as i64; 160usize];
     let data = svcreate4_s64(
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_s64(
-            (len + 1usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_s64(
-            (len + 2usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_s64(
-            (len + 3usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 4usize.try_into().unwrap()),
+        svindex_s64((len + 1usize) as i64, 4usize.try_into().unwrap()),
+        svindex_s64((len + 2usize) as i64, 4usize.try_into().unwrap()),
+        svindex_s64((len + 3usize) as i64, 4usize.try_into().unwrap()),
     );
     svst4_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -6765,31 +6249,19 @@ unsafe fn test_svld4_vnum_s64_with_svst4_vnum_s64() {
     let loaded = svld4_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1);
     assert_vector_matches_i64(
         svget4_s64::<{ 0usize as i32 }>(loaded),
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i64(
         svget4_s64::<{ 1usize as i32 }>(loaded),
-        svindex_s64(
-            (len + 1usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 1usize) as i64, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i64(
         svget4_s64::<{ 2usize as i32 }>(loaded),
-        svindex_s64(
-            (len + 2usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 2usize) as i64, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_i64(
         svget4_s64::<{ 3usize as i32 }>(loaded),
-        svindex_s64(
-            (len + 3usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 3usize) as i64, 4usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -6797,22 +6269,10 @@ unsafe fn test_svld4_vnum_u8_with_svst4_vnum_u8() {
     let len = svcntb() as usize;
     let mut storage = [0 as u8; 1280usize];
     let data = svcreate4_u8(
-        svindex_u8(
-            (len + 0usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_u8(
-            (len + 1usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_u8(
-            (len + 2usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_u8(
-            (len + 3usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u8((len + 0usize) as u8, 4usize.try_into().unwrap()),
+        svindex_u8((len + 1usize) as u8, 4usize.try_into().unwrap()),
+        svindex_u8((len + 2usize) as u8, 4usize.try_into().unwrap()),
+        svindex_u8((len + 3usize) as u8, 4usize.try_into().unwrap()),
     );
     svst4_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -6822,31 +6282,19 @@ unsafe fn test_svld4_vnum_u8_with_svst4_vnum_u8() {
     let loaded = svld4_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1);
     assert_vector_matches_u8(
         svget4_u8::<{ 0usize as i32 }>(loaded),
-        svindex_u8(
-            (len + 0usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u8((len + 0usize) as u8, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u8(
         svget4_u8::<{ 1usize as i32 }>(loaded),
-        svindex_u8(
-            (len + 1usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u8((len + 1usize) as u8, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u8(
         svget4_u8::<{ 2usize as i32 }>(loaded),
-        svindex_u8(
-            (len + 2usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u8((len + 2usize) as u8, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u8(
         svget4_u8::<{ 3usize as i32 }>(loaded),
-        svindex_u8(
-            (len + 3usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u8((len + 3usize) as u8, 4usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -6854,22 +6302,10 @@ unsafe fn test_svld4_vnum_u16_with_svst4_vnum_u16() {
     let len = svcnth() as usize;
     let mut storage = [0 as u16; 640usize];
     let data = svcreate4_u16(
-        svindex_u16(
-            (len + 0usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_u16(
-            (len + 1usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_u16(
-            (len + 2usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_u16(
-            (len + 3usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 0usize) as u16, 4usize.try_into().unwrap()),
+        svindex_u16((len + 1usize) as u16, 4usize.try_into().unwrap()),
+        svindex_u16((len + 2usize) as u16, 4usize.try_into().unwrap()),
+        svindex_u16((len + 3usize) as u16, 4usize.try_into().unwrap()),
     );
     svst4_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -6879,31 +6315,19 @@ unsafe fn test_svld4_vnum_u16_with_svst4_vnum_u16() {
     let loaded = svld4_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1);
     assert_vector_matches_u16(
         svget4_u16::<{ 0usize as i32 }>(loaded),
-        svindex_u16(
-            (len + 0usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 0usize) as u16, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u16(
         svget4_u16::<{ 1usize as i32 }>(loaded),
-        svindex_u16(
-            (len + 1usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 1usize) as u16, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u16(
         svget4_u16::<{ 2usize as i32 }>(loaded),
-        svindex_u16(
-            (len + 2usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 2usize) as u16, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u16(
         svget4_u16::<{ 3usize as i32 }>(loaded),
-        svindex_u16(
-            (len + 3usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 3usize) as u16, 4usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -6911,22 +6335,10 @@ unsafe fn test_svld4_vnum_u32_with_svst4_vnum_u32() {
     let len = svcntw() as usize;
     let mut storage = [0 as u32; 320usize];
     let data = svcreate4_u32(
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_u32(
-            (len + 1usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_u32(
-            (len + 2usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_u32(
-            (len + 3usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 4usize.try_into().unwrap()),
+        svindex_u32((len + 1usize) as u32, 4usize.try_into().unwrap()),
+        svindex_u32((len + 2usize) as u32, 4usize.try_into().unwrap()),
+        svindex_u32((len + 3usize) as u32, 4usize.try_into().unwrap()),
     );
     svst4_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -6936,31 +6348,19 @@ unsafe fn test_svld4_vnum_u32_with_svst4_vnum_u32() {
     let loaded = svld4_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1);
     assert_vector_matches_u32(
         svget4_u32::<{ 0usize as i32 }>(loaded),
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u32(
         svget4_u32::<{ 1usize as i32 }>(loaded),
-        svindex_u32(
-            (len + 1usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 1usize) as u32, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u32(
         svget4_u32::<{ 2usize as i32 }>(loaded),
-        svindex_u32(
-            (len + 2usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 2usize) as u32, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u32(
         svget4_u32::<{ 3usize as i32 }>(loaded),
-        svindex_u32(
-            (len + 3usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 3usize) as u32, 4usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -6968,22 +6368,10 @@ unsafe fn test_svld4_vnum_u64_with_svst4_vnum_u64() {
     let len = svcntd() as usize;
     let mut storage = [0 as u64; 160usize];
     let data = svcreate4_u64(
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_u64(
-            (len + 1usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_u64(
-            (len + 2usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
-        svindex_u64(
-            (len + 3usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 4usize.try_into().unwrap()),
+        svindex_u64((len + 1usize) as u64, 4usize.try_into().unwrap()),
+        svindex_u64((len + 2usize) as u64, 4usize.try_into().unwrap()),
+        svindex_u64((len + 3usize) as u64, 4usize.try_into().unwrap()),
     );
     svst4_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -6993,31 +6381,19 @@ unsafe fn test_svld4_vnum_u64_with_svst4_vnum_u64() {
     let loaded = svld4_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1);
     assert_vector_matches_u64(
         svget4_u64::<{ 0usize as i32 }>(loaded),
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u64(
         svget4_u64::<{ 1usize as i32 }>(loaded),
-        svindex_u64(
-            (len + 1usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 1usize) as u64, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u64(
         svget4_u64::<{ 2usize as i32 }>(loaded),
-        svindex_u64(
-            (len + 2usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 2usize) as u64, 4usize.try_into().unwrap()),
     );
     assert_vector_matches_u64(
         svget4_u64::<{ 3usize as i32 }>(loaded),
-        svindex_u64(
-            (len + 3usize).try_into().unwrap(),
-            4usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 3usize) as u64, 4usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7053,7 +6429,7 @@ unsafe fn test_svldff1_s8() {
     let loaded = svldff1_s8(svptrue_b8(), I8_DATA.as_ptr());
     assert_vector_matches_i8(
         loaded,
-        svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s8((0usize) as i8, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7063,7 +6439,7 @@ unsafe fn test_svldff1_s16() {
     let loaded = svldff1_s16(svptrue_b16(), I16_DATA.as_ptr());
     assert_vector_matches_i16(
         loaded,
-        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s16((0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7073,7 +6449,7 @@ unsafe fn test_svldff1_s32() {
     let loaded = svldff1_s32(svptrue_b32(), I32_DATA.as_ptr());
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7083,7 +6459,7 @@ unsafe fn test_svldff1_s64() {
     let loaded = svldff1_s64(svptrue_b64(), I64_DATA.as_ptr());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7093,7 +6469,7 @@ unsafe fn test_svldff1_u8() {
     let loaded = svldff1_u8(svptrue_b8(), U8_DATA.as_ptr());
     assert_vector_matches_u8(
         loaded,
-        svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u8((0usize) as u8, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7103,7 +6479,7 @@ unsafe fn test_svldff1_u16() {
     let loaded = svldff1_u16(svptrue_b16(), U16_DATA.as_ptr());
     assert_vector_matches_u16(
         loaded,
-        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u16((0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7113,7 +6489,7 @@ unsafe fn test_svldff1_u32() {
     let loaded = svldff1_u32(svptrue_b32(), U32_DATA.as_ptr());
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7123,7 +6499,7 @@ unsafe fn test_svldff1_u64() {
     let loaded = svldff1_u64(svptrue_b64(), U64_DATA.as_ptr());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7168,10 +6544,7 @@ unsafe fn test_svldff1_vnum_s8() {
     let len = svcntb() as usize;
     assert_vector_matches_i8(
         loaded,
-        svindex_s8(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s8((len + 0usize) as i8, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7182,10 +6555,7 @@ unsafe fn test_svldff1_vnum_s16() {
     let len = svcnth() as usize;
     assert_vector_matches_i16(
         loaded,
-        svindex_s16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7196,10 +6566,7 @@ unsafe fn test_svldff1_vnum_s32() {
     let len = svcntw() as usize;
     assert_vector_matches_i32(
         loaded,
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7210,10 +6577,7 @@ unsafe fn test_svldff1_vnum_s64() {
     let len = svcntd() as usize;
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7224,10 +6588,7 @@ unsafe fn test_svldff1_vnum_u8() {
     let len = svcntb() as usize;
     assert_vector_matches_u8(
         loaded,
-        svindex_u8(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u8((len + 0usize) as u8, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7238,10 +6599,7 @@ unsafe fn test_svldff1_vnum_u16() {
     let len = svcnth() as usize;
     assert_vector_matches_u16(
         loaded,
-        svindex_u16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7252,10 +6610,7 @@ unsafe fn test_svldff1_vnum_u32() {
     let len = svcntw() as usize;
     assert_vector_matches_u32(
         loaded,
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7266,10 +6621,7 @@ unsafe fn test_svldff1_vnum_u64() {
     let len = svcntd() as usize;
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7279,7 +6631,7 @@ unsafe fn test_svldff1sb_s16() {
     let loaded = svldff1sb_s16(svptrue_b8(), I8_DATA.as_ptr());
     assert_vector_matches_i16(
         loaded,
-        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s16((0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7289,7 +6641,7 @@ unsafe fn test_svldff1sb_s32() {
     let loaded = svldff1sb_s32(svptrue_b8(), I8_DATA.as_ptr());
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7299,7 +6651,7 @@ unsafe fn test_svldff1sh_s32() {
     let loaded = svldff1sh_s32(svptrue_b16(), I16_DATA.as_ptr());
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7309,7 +6661,7 @@ unsafe fn test_svldff1sb_s64() {
     let loaded = svldff1sb_s64(svptrue_b8(), I8_DATA.as_ptr());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7319,7 +6671,7 @@ unsafe fn test_svldff1sh_s64() {
     let loaded = svldff1sh_s64(svptrue_b16(), I16_DATA.as_ptr());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7329,7 +6681,7 @@ unsafe fn test_svldff1sw_s64() {
     let loaded = svldff1sw_s64(svptrue_b32(), I32_DATA.as_ptr());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7339,7 +6691,7 @@ unsafe fn test_svldff1sb_u16() {
     let loaded = svldff1sb_u16(svptrue_b8(), I8_DATA.as_ptr());
     assert_vector_matches_u16(
         loaded,
-        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u16((0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7349,7 +6701,7 @@ unsafe fn test_svldff1sb_u32() {
     let loaded = svldff1sb_u32(svptrue_b8(), I8_DATA.as_ptr());
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7359,7 +6711,7 @@ unsafe fn test_svldff1sh_u32() {
     let loaded = svldff1sh_u32(svptrue_b16(), I16_DATA.as_ptr());
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7369,7 +6721,7 @@ unsafe fn test_svldff1sb_u64() {
     let loaded = svldff1sb_u64(svptrue_b8(), I8_DATA.as_ptr());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7379,7 +6731,7 @@ unsafe fn test_svldff1sh_u64() {
     let loaded = svldff1sh_u64(svptrue_b16(), I16_DATA.as_ptr());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7389,7 +6741,7 @@ unsafe fn test_svldff1sw_u64() {
     let loaded = svldff1sw_u64(svptrue_b32(), I32_DATA.as_ptr());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7400,10 +6752,7 @@ unsafe fn test_svldff1sb_vnum_s16() {
     let len = svcnth() as usize;
     assert_vector_matches_i16(
         loaded,
-        svindex_s16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7414,10 +6763,7 @@ unsafe fn test_svldff1sb_vnum_s32() {
     let len = svcntw() as usize;
     assert_vector_matches_i32(
         loaded,
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7428,10 +6774,7 @@ unsafe fn test_svldff1sh_vnum_s32() {
     let len = svcntw() as usize;
     assert_vector_matches_i32(
         loaded,
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7442,10 +6785,7 @@ unsafe fn test_svldff1sb_vnum_s64() {
     let len = svcntd() as usize;
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7456,10 +6796,7 @@ unsafe fn test_svldff1sh_vnum_s64() {
     let len = svcntd() as usize;
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7470,10 +6807,7 @@ unsafe fn test_svldff1sw_vnum_s64() {
     let len = svcntd() as usize;
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7484,10 +6818,7 @@ unsafe fn test_svldff1sb_vnum_u16() {
     let len = svcnth() as usize;
     assert_vector_matches_u16(
         loaded,
-        svindex_u16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7498,10 +6829,7 @@ unsafe fn test_svldff1sb_vnum_u32() {
     let len = svcntw() as usize;
     assert_vector_matches_u32(
         loaded,
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7512,10 +6840,7 @@ unsafe fn test_svldff1sh_vnum_u32() {
     let len = svcntw() as usize;
     assert_vector_matches_u32(
         loaded,
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7526,10 +6851,7 @@ unsafe fn test_svldff1sb_vnum_u64() {
     let len = svcntd() as usize;
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7540,10 +6862,7 @@ unsafe fn test_svldff1sh_vnum_u64() {
     let len = svcntd() as usize;
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7554,10 +6873,7 @@ unsafe fn test_svldff1sw_vnum_u64() {
     let len = svcntd() as usize;
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7567,7 +6883,7 @@ unsafe fn test_svldff1ub_s16() {
     let loaded = svldff1ub_s16(svptrue_b8(), U8_DATA.as_ptr());
     assert_vector_matches_i16(
         loaded,
-        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s16((0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7577,7 +6893,7 @@ unsafe fn test_svldff1ub_s32() {
     let loaded = svldff1ub_s32(svptrue_b8(), U8_DATA.as_ptr());
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7587,7 +6903,7 @@ unsafe fn test_svldff1uh_s32() {
     let loaded = svldff1uh_s32(svptrue_b16(), U16_DATA.as_ptr());
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7597,7 +6913,7 @@ unsafe fn test_svldff1ub_s64() {
     let loaded = svldff1ub_s64(svptrue_b8(), U8_DATA.as_ptr());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7607,7 +6923,7 @@ unsafe fn test_svldff1uh_s64() {
     let loaded = svldff1uh_s64(svptrue_b16(), U16_DATA.as_ptr());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7617,7 +6933,7 @@ unsafe fn test_svldff1uw_s64() {
     let loaded = svldff1uw_s64(svptrue_b32(), U32_DATA.as_ptr());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7627,7 +6943,7 @@ unsafe fn test_svldff1ub_u16() {
     let loaded = svldff1ub_u16(svptrue_b8(), U8_DATA.as_ptr());
     assert_vector_matches_u16(
         loaded,
-        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u16((0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7637,7 +6953,7 @@ unsafe fn test_svldff1ub_u32() {
     let loaded = svldff1ub_u32(svptrue_b8(), U8_DATA.as_ptr());
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7647,7 +6963,7 @@ unsafe fn test_svldff1uh_u32() {
     let loaded = svldff1uh_u32(svptrue_b16(), U16_DATA.as_ptr());
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7657,7 +6973,7 @@ unsafe fn test_svldff1ub_u64() {
     let loaded = svldff1ub_u64(svptrue_b8(), U8_DATA.as_ptr());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7667,7 +6983,7 @@ unsafe fn test_svldff1uh_u64() {
     let loaded = svldff1uh_u64(svptrue_b16(), U16_DATA.as_ptr());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7677,7 +6993,7 @@ unsafe fn test_svldff1uw_u64() {
     let loaded = svldff1uw_u64(svptrue_b32(), U32_DATA.as_ptr());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7688,10 +7004,7 @@ unsafe fn test_svldff1ub_vnum_s16() {
     let len = svcnth() as usize;
     assert_vector_matches_i16(
         loaded,
-        svindex_s16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7702,10 +7015,7 @@ unsafe fn test_svldff1ub_vnum_s32() {
     let len = svcntw() as usize;
     assert_vector_matches_i32(
         loaded,
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7716,10 +7026,7 @@ unsafe fn test_svldff1uh_vnum_s32() {
     let len = svcntw() as usize;
     assert_vector_matches_i32(
         loaded,
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7730,10 +7037,7 @@ unsafe fn test_svldff1ub_vnum_s64() {
     let len = svcntd() as usize;
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7744,10 +7048,7 @@ unsafe fn test_svldff1uh_vnum_s64() {
     let len = svcntd() as usize;
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7758,10 +7059,7 @@ unsafe fn test_svldff1uw_vnum_s64() {
     let len = svcntd() as usize;
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7772,10 +7070,7 @@ unsafe fn test_svldff1ub_vnum_u16() {
     let len = svcnth() as usize;
     assert_vector_matches_u16(
         loaded,
-        svindex_u16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7786,10 +7081,7 @@ unsafe fn test_svldff1ub_vnum_u32() {
     let len = svcntw() as usize;
     assert_vector_matches_u32(
         loaded,
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7800,10 +7092,7 @@ unsafe fn test_svldff1uh_vnum_u32() {
     let len = svcntw() as usize;
     assert_vector_matches_u32(
         loaded,
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7814,10 +7103,7 @@ unsafe fn test_svldff1ub_vnum_u64() {
     let len = svcntd() as usize;
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7828,10 +7114,7 @@ unsafe fn test_svldff1uh_vnum_u64() {
     let len = svcntd() as usize;
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7842,10 +7125,7 @@ unsafe fn test_svldff1uw_vnum_u64() {
     let len = svcntd() as usize;
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7881,7 +7161,7 @@ unsafe fn test_svldnf1_s8() {
     let loaded = svldnf1_s8(svptrue_b8(), I8_DATA.as_ptr());
     assert_vector_matches_i8(
         loaded,
-        svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s8((0usize) as i8, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7891,7 +7171,7 @@ unsafe fn test_svldnf1_s16() {
     let loaded = svldnf1_s16(svptrue_b16(), I16_DATA.as_ptr());
     assert_vector_matches_i16(
         loaded,
-        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s16((0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7901,7 +7181,7 @@ unsafe fn test_svldnf1_s32() {
     let loaded = svldnf1_s32(svptrue_b32(), I32_DATA.as_ptr());
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7911,7 +7191,7 @@ unsafe fn test_svldnf1_s64() {
     let loaded = svldnf1_s64(svptrue_b64(), I64_DATA.as_ptr());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7921,7 +7201,7 @@ unsafe fn test_svldnf1_u8() {
     let loaded = svldnf1_u8(svptrue_b8(), U8_DATA.as_ptr());
     assert_vector_matches_u8(
         loaded,
-        svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u8((0usize) as u8, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7931,7 +7211,7 @@ unsafe fn test_svldnf1_u16() {
     let loaded = svldnf1_u16(svptrue_b16(), U16_DATA.as_ptr());
     assert_vector_matches_u16(
         loaded,
-        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u16((0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7941,7 +7221,7 @@ unsafe fn test_svldnf1_u32() {
     let loaded = svldnf1_u32(svptrue_b32(), U32_DATA.as_ptr());
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7951,7 +7231,7 @@ unsafe fn test_svldnf1_u64() {
     let loaded = svldnf1_u64(svptrue_b64(), U64_DATA.as_ptr());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -7996,10 +7276,7 @@ unsafe fn test_svldnf1_vnum_s8() {
     let len = svcntb() as usize;
     assert_vector_matches_i8(
         loaded,
-        svindex_s8(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s8((len + 0usize) as i8, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8010,10 +7287,7 @@ unsafe fn test_svldnf1_vnum_s16() {
     let len = svcnth() as usize;
     assert_vector_matches_i16(
         loaded,
-        svindex_s16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8024,10 +7298,7 @@ unsafe fn test_svldnf1_vnum_s32() {
     let len = svcntw() as usize;
     assert_vector_matches_i32(
         loaded,
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8038,10 +7309,7 @@ unsafe fn test_svldnf1_vnum_s64() {
     let len = svcntd() as usize;
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8052,10 +7320,7 @@ unsafe fn test_svldnf1_vnum_u8() {
     let len = svcntb() as usize;
     assert_vector_matches_u8(
         loaded,
-        svindex_u8(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u8((len + 0usize) as u8, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8066,10 +7331,7 @@ unsafe fn test_svldnf1_vnum_u16() {
     let len = svcnth() as usize;
     assert_vector_matches_u16(
         loaded,
-        svindex_u16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8080,10 +7342,7 @@ unsafe fn test_svldnf1_vnum_u32() {
     let len = svcntw() as usize;
     assert_vector_matches_u32(
         loaded,
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8094,10 +7353,7 @@ unsafe fn test_svldnf1_vnum_u64() {
     let len = svcntd() as usize;
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8107,7 +7363,7 @@ unsafe fn test_svldnf1sb_s16() {
     let loaded = svldnf1sb_s16(svptrue_b8(), I8_DATA.as_ptr());
     assert_vector_matches_i16(
         loaded,
-        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s16((0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8117,7 +7373,7 @@ unsafe fn test_svldnf1sb_s32() {
     let loaded = svldnf1sb_s32(svptrue_b8(), I8_DATA.as_ptr());
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8127,7 +7383,7 @@ unsafe fn test_svldnf1sh_s32() {
     let loaded = svldnf1sh_s32(svptrue_b16(), I16_DATA.as_ptr());
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8137,7 +7393,7 @@ unsafe fn test_svldnf1sb_s64() {
     let loaded = svldnf1sb_s64(svptrue_b8(), I8_DATA.as_ptr());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8147,7 +7403,7 @@ unsafe fn test_svldnf1sh_s64() {
     let loaded = svldnf1sh_s64(svptrue_b16(), I16_DATA.as_ptr());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8157,7 +7413,7 @@ unsafe fn test_svldnf1sw_s64() {
     let loaded = svldnf1sw_s64(svptrue_b32(), I32_DATA.as_ptr());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8167,7 +7423,7 @@ unsafe fn test_svldnf1sb_u16() {
     let loaded = svldnf1sb_u16(svptrue_b8(), I8_DATA.as_ptr());
     assert_vector_matches_u16(
         loaded,
-        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u16((0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8177,7 +7433,7 @@ unsafe fn test_svldnf1sb_u32() {
     let loaded = svldnf1sb_u32(svptrue_b8(), I8_DATA.as_ptr());
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8187,7 +7443,7 @@ unsafe fn test_svldnf1sh_u32() {
     let loaded = svldnf1sh_u32(svptrue_b16(), I16_DATA.as_ptr());
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8197,7 +7453,7 @@ unsafe fn test_svldnf1sb_u64() {
     let loaded = svldnf1sb_u64(svptrue_b8(), I8_DATA.as_ptr());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8207,7 +7463,7 @@ unsafe fn test_svldnf1sh_u64() {
     let loaded = svldnf1sh_u64(svptrue_b16(), I16_DATA.as_ptr());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8217,7 +7473,7 @@ unsafe fn test_svldnf1sw_u64() {
     let loaded = svldnf1sw_u64(svptrue_b32(), I32_DATA.as_ptr());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8228,10 +7484,7 @@ unsafe fn test_svldnf1sb_vnum_s16() {
     let len = svcnth() as usize;
     assert_vector_matches_i16(
         loaded,
-        svindex_s16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8242,10 +7495,7 @@ unsafe fn test_svldnf1sb_vnum_s32() {
     let len = svcntw() as usize;
     assert_vector_matches_i32(
         loaded,
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8256,10 +7506,7 @@ unsafe fn test_svldnf1sh_vnum_s32() {
     let len = svcntw() as usize;
     assert_vector_matches_i32(
         loaded,
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8270,10 +7517,7 @@ unsafe fn test_svldnf1sb_vnum_s64() {
     let len = svcntd() as usize;
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8284,10 +7528,7 @@ unsafe fn test_svldnf1sh_vnum_s64() {
     let len = svcntd() as usize;
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8298,10 +7539,7 @@ unsafe fn test_svldnf1sw_vnum_s64() {
     let len = svcntd() as usize;
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8312,10 +7550,7 @@ unsafe fn test_svldnf1sb_vnum_u16() {
     let len = svcnth() as usize;
     assert_vector_matches_u16(
         loaded,
-        svindex_u16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8326,10 +7561,7 @@ unsafe fn test_svldnf1sb_vnum_u32() {
     let len = svcntw() as usize;
     assert_vector_matches_u32(
         loaded,
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8340,10 +7572,7 @@ unsafe fn test_svldnf1sh_vnum_u32() {
     let len = svcntw() as usize;
     assert_vector_matches_u32(
         loaded,
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8354,10 +7583,7 @@ unsafe fn test_svldnf1sb_vnum_u64() {
     let len = svcntd() as usize;
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8368,10 +7594,7 @@ unsafe fn test_svldnf1sh_vnum_u64() {
     let len = svcntd() as usize;
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8382,10 +7605,7 @@ unsafe fn test_svldnf1sw_vnum_u64() {
     let len = svcntd() as usize;
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8395,7 +7615,7 @@ unsafe fn test_svldnf1ub_s16() {
     let loaded = svldnf1ub_s16(svptrue_b8(), U8_DATA.as_ptr());
     assert_vector_matches_i16(
         loaded,
-        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s16((0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8405,7 +7625,7 @@ unsafe fn test_svldnf1ub_s32() {
     let loaded = svldnf1ub_s32(svptrue_b8(), U8_DATA.as_ptr());
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8415,7 +7635,7 @@ unsafe fn test_svldnf1uh_s32() {
     let loaded = svldnf1uh_s32(svptrue_b16(), U16_DATA.as_ptr());
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8425,7 +7645,7 @@ unsafe fn test_svldnf1ub_s64() {
     let loaded = svldnf1ub_s64(svptrue_b8(), U8_DATA.as_ptr());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8435,7 +7655,7 @@ unsafe fn test_svldnf1uh_s64() {
     let loaded = svldnf1uh_s64(svptrue_b16(), U16_DATA.as_ptr());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8445,7 +7665,7 @@ unsafe fn test_svldnf1uw_s64() {
     let loaded = svldnf1uw_s64(svptrue_b32(), U32_DATA.as_ptr());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8455,7 +7675,7 @@ unsafe fn test_svldnf1ub_u16() {
     let loaded = svldnf1ub_u16(svptrue_b8(), U8_DATA.as_ptr());
     assert_vector_matches_u16(
         loaded,
-        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u16((0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8465,7 +7685,7 @@ unsafe fn test_svldnf1ub_u32() {
     let loaded = svldnf1ub_u32(svptrue_b8(), U8_DATA.as_ptr());
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8475,7 +7695,7 @@ unsafe fn test_svldnf1uh_u32() {
     let loaded = svldnf1uh_u32(svptrue_b16(), U16_DATA.as_ptr());
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8485,7 +7705,7 @@ unsafe fn test_svldnf1ub_u64() {
     let loaded = svldnf1ub_u64(svptrue_b8(), U8_DATA.as_ptr());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8495,7 +7715,7 @@ unsafe fn test_svldnf1uh_u64() {
     let loaded = svldnf1uh_u64(svptrue_b16(), U16_DATA.as_ptr());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8505,7 +7725,7 @@ unsafe fn test_svldnf1uw_u64() {
     let loaded = svldnf1uw_u64(svptrue_b32(), U32_DATA.as_ptr());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8516,10 +7736,7 @@ unsafe fn test_svldnf1ub_vnum_s16() {
     let len = svcnth() as usize;
     assert_vector_matches_i16(
         loaded,
-        svindex_s16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8530,10 +7747,7 @@ unsafe fn test_svldnf1ub_vnum_s32() {
     let len = svcntw() as usize;
     assert_vector_matches_i32(
         loaded,
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8544,10 +7758,7 @@ unsafe fn test_svldnf1uh_vnum_s32() {
     let len = svcntw() as usize;
     assert_vector_matches_i32(
         loaded,
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8558,10 +7769,7 @@ unsafe fn test_svldnf1ub_vnum_s64() {
     let len = svcntd() as usize;
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8572,10 +7780,7 @@ unsafe fn test_svldnf1uh_vnum_s64() {
     let len = svcntd() as usize;
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8586,10 +7791,7 @@ unsafe fn test_svldnf1uw_vnum_s64() {
     let len = svcntd() as usize;
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8600,10 +7802,7 @@ unsafe fn test_svldnf1ub_vnum_u16() {
     let len = svcnth() as usize;
     assert_vector_matches_u16(
         loaded,
-        svindex_u16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8614,10 +7813,7 @@ unsafe fn test_svldnf1ub_vnum_u32() {
     let len = svcntw() as usize;
     assert_vector_matches_u32(
         loaded,
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8628,10 +7824,7 @@ unsafe fn test_svldnf1uh_vnum_u32() {
     let len = svcntw() as usize;
     assert_vector_matches_u32(
         loaded,
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8642,10 +7835,7 @@ unsafe fn test_svldnf1ub_vnum_u64() {
     let len = svcntd() as usize;
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8656,10 +7846,7 @@ unsafe fn test_svldnf1uh_vnum_u64() {
     let len = svcntd() as usize;
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8670,10 +7857,7 @@ unsafe fn test_svldnf1uw_vnum_u64() {
     let len = svcntd() as usize;
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8721,7 +7905,7 @@ unsafe fn test_svldnt1_f64_with_svstnt1_f64() {
 #[simd_test(enable = "sve")]
 unsafe fn test_svldnt1_s8_with_svstnt1_s8() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s8((0usize) as i8, 1usize.try_into().unwrap());
     svstnt1_s8(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i8 || val == i as i8);
@@ -8730,13 +7914,13 @@ unsafe fn test_svldnt1_s8_with_svstnt1_s8() {
     let loaded = svldnt1_s8(svptrue_b8(), storage.as_ptr() as *const i8);
     assert_vector_matches_i8(
         loaded,
-        svindex_s8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s8((0usize) as i8, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svldnt1_s16_with_svstnt1_s16() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s16((0usize) as i16, 1usize.try_into().unwrap());
     svstnt1_s16(svptrue_b16(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i16 || val == i as i16);
@@ -8745,13 +7929,13 @@ unsafe fn test_svldnt1_s16_with_svstnt1_s16() {
     let loaded = svldnt1_s16(svptrue_b16(), storage.as_ptr() as *const i16);
     assert_vector_matches_i16(
         loaded,
-        svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s16((0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svldnt1_s32_with_svstnt1_s32() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     svstnt1_s32(svptrue_b32(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i32 || val == i as i32);
@@ -8760,13 +7944,13 @@ unsafe fn test_svldnt1_s32_with_svstnt1_s32() {
     let loaded = svldnt1_s32(svptrue_b32(), storage.as_ptr() as *const i32);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svldnt1_s64_with_svstnt1_s64() {
     let mut storage = [0 as i64; 160usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     svstnt1_s64(svptrue_b64(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i64 || val == i as i64);
@@ -8775,13 +7959,13 @@ unsafe fn test_svldnt1_s64_with_svstnt1_s64() {
     let loaded = svldnt1_s64(svptrue_b64(), storage.as_ptr() as *const i64);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svldnt1_u8_with_svstnt1_u8() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u8((0usize) as u8, 1usize.try_into().unwrap());
     svstnt1_u8(svptrue_b8(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u8 || val == i as u8);
@@ -8790,13 +7974,13 @@ unsafe fn test_svldnt1_u8_with_svstnt1_u8() {
     let loaded = svldnt1_u8(svptrue_b8(), storage.as_ptr() as *const u8);
     assert_vector_matches_u8(
         loaded,
-        svindex_u8((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u8((0usize) as u8, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svldnt1_u16_with_svstnt1_u16() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u16((0usize) as u16, 1usize.try_into().unwrap());
     svstnt1_u16(svptrue_b16(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u16 || val == i as u16);
@@ -8805,13 +7989,13 @@ unsafe fn test_svldnt1_u16_with_svstnt1_u16() {
     let loaded = svldnt1_u16(svptrue_b16(), storage.as_ptr() as *const u16);
     assert_vector_matches_u16(
         loaded,
-        svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u16((0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svldnt1_u32_with_svstnt1_u32() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     svstnt1_u32(svptrue_b32(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u32 || val == i as u32);
@@ -8820,13 +8004,13 @@ unsafe fn test_svldnt1_u32_with_svstnt1_u32() {
     let loaded = svldnt1_u32(svptrue_b32(), storage.as_ptr() as *const u32);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svldnt1_u64_with_svstnt1_u64() {
     let mut storage = [0 as u64; 160usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     svstnt1_u64(svptrue_b64(), storage.as_mut_ptr(), data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u64 || val == i as u64);
@@ -8835,7 +8019,7 @@ unsafe fn test_svldnt1_u64_with_svstnt1_u64() {
     let loaded = svldnt1_u64(svptrue_b64(), storage.as_ptr() as *const u64);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
@@ -8898,10 +8082,7 @@ unsafe fn test_svldnt1_vnum_f64_with_svstnt1_vnum_f64() {
 unsafe fn test_svldnt1_vnum_s8_with_svstnt1_vnum_s8() {
     let len = svcntb() as usize;
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s8(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s8((len + 0usize) as i8, 1usize.try_into().unwrap());
     svstnt1_vnum_s8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i8 || val == i as i8);
@@ -8910,20 +8091,14 @@ unsafe fn test_svldnt1_vnum_s8_with_svstnt1_vnum_s8() {
     let loaded = svldnt1_vnum_s8(svptrue_b8(), storage.as_ptr() as *const i8, 1);
     assert_vector_matches_i8(
         loaded,
-        svindex_s8(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s8((len + 0usize) as i8, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svldnt1_vnum_s16_with_svstnt1_vnum_s16() {
     let len = svcnth() as usize;
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s16(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s16((len + 0usize) as i16, 1usize.try_into().unwrap());
     svstnt1_vnum_s16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i16 || val == i as i16);
@@ -8932,20 +8107,14 @@ unsafe fn test_svldnt1_vnum_s16_with_svstnt1_vnum_s16() {
     let loaded = svldnt1_vnum_s16(svptrue_b16(), storage.as_ptr() as *const i16, 1);
     assert_vector_matches_i16(
         loaded,
-        svindex_s16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s16((len + 0usize) as i16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svldnt1_vnum_s32_with_svstnt1_vnum_s32() {
     let len = svcntw() as usize;
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s32(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap());
     svstnt1_vnum_s32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i32 || val == i as i32);
@@ -8954,20 +8123,14 @@ unsafe fn test_svldnt1_vnum_s32_with_svstnt1_vnum_s32() {
     let loaded = svldnt1_vnum_s32(svptrue_b32(), storage.as_ptr() as *const i32, 1);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s32((len + 0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svldnt1_vnum_s64_with_svstnt1_vnum_s64() {
     let len = svcntd() as usize;
     let mut storage = [0 as i64; 160usize];
-    let data = svindex_s64(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap());
     svstnt1_vnum_s64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as i64 || val == i as i64);
@@ -8976,20 +8139,14 @@ unsafe fn test_svldnt1_vnum_s64_with_svstnt1_vnum_s64() {
     let loaded = svldnt1_vnum_s64(svptrue_b64(), storage.as_ptr() as *const i64, 1);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_s64((len + 0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svldnt1_vnum_u8_with_svstnt1_vnum_u8() {
     let len = svcntb() as usize;
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u8(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u8((len + 0usize) as u8, 1usize.try_into().unwrap());
     svstnt1_vnum_u8(svptrue_b8(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u8 || val == i as u8);
@@ -8998,20 +8155,14 @@ unsafe fn test_svldnt1_vnum_u8_with_svstnt1_vnum_u8() {
     let loaded = svldnt1_vnum_u8(svptrue_b8(), storage.as_ptr() as *const u8, 1);
     assert_vector_matches_u8(
         loaded,
-        svindex_u8(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u8((len + 0usize) as u8, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svldnt1_vnum_u16_with_svstnt1_vnum_u16() {
     let len = svcnth() as usize;
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u16(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u16((len + 0usize) as u16, 1usize.try_into().unwrap());
     svstnt1_vnum_u16(svptrue_b16(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u16 || val == i as u16);
@@ -9020,20 +8171,14 @@ unsafe fn test_svldnt1_vnum_u16_with_svstnt1_vnum_u16() {
     let loaded = svldnt1_vnum_u16(svptrue_b16(), storage.as_ptr() as *const u16, 1);
     assert_vector_matches_u16(
         loaded,
-        svindex_u16(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u16((len + 0usize) as u16, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svldnt1_vnum_u32_with_svstnt1_vnum_u32() {
     let len = svcntw() as usize;
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u32(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap());
     svstnt1_vnum_u32(svptrue_b32(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u32 || val == i as u32);
@@ -9042,20 +8187,14 @@ unsafe fn test_svldnt1_vnum_u32_with_svstnt1_vnum_u32() {
     let loaded = svldnt1_vnum_u32(svptrue_b32(), storage.as_ptr() as *const u32, 1);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u32((len + 0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
 unsafe fn test_svldnt1_vnum_u64_with_svstnt1_vnum_u64() {
     let len = svcntd() as usize;
     let mut storage = [0 as u64; 160usize];
-    let data = svindex_u64(
-        (len + 0usize).try_into().unwrap(),
-        1usize.try_into().unwrap(),
-    );
+    let data = svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap());
     svstnt1_vnum_u64(svptrue_b64(), storage.as_mut_ptr(), 1, data);
     for (i, &val) in storage.iter().enumerate() {
         assert!(val == 0 as u64 || val == i as u64);
@@ -9064,10 +8203,7 @@ unsafe fn test_svldnt1_vnum_u64_with_svstnt1_vnum_u64() {
     let loaded = svldnt1_vnum_u64(svptrue_b64(), storage.as_ptr() as *const u64, 1);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64(
-            (len + 0usize).try_into().unwrap(),
-            1usize.try_into().unwrap(),
-        ),
+        svindex_u64((len + 0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve")]
diff --git a/crates/core_arch/src/aarch64/sve/ld_st_tests_sve2.rs b/crates/core_arch/src/aarch64/sve/ld_st_tests_sve2.rs
index dbb63e1bfd..23717ec862 100644
--- a/crates/core_arch/src/aarch64/sve/ld_st_tests_sve2.rs
+++ b/crates/core_arch/src/aarch64/sve/ld_st_tests_sve2.rs
@@ -239,7 +239,7 @@ unsafe fn test_svldnt1_gather_s64index_f64_with_svstnt1_scatter_s64index_f64() {
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_s64index_s64_with_svstnt1_scatter_s64index_s64() {
     let mut storage = [0 as i64; 160usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svstnt1_scatter_s64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -250,13 +250,13 @@ unsafe fn test_svldnt1_gather_s64index_s64_with_svstnt1_scatter_s64index_s64() {
         svldnt1_gather_s64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_s64index_u64_with_svstnt1_scatter_s64index_u64() {
     let mut storage = [0 as u64; 160usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svstnt1_scatter_s64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -267,7 +267,7 @@ unsafe fn test_svldnt1_gather_s64index_u64_with_svstnt1_scatter_s64index_u64() {
         svldnt1_gather_s64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
@@ -296,7 +296,7 @@ unsafe fn test_svldnt1_gather_u64index_f64_with_svstnt1_scatter_u64index_f64() {
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_u64index_s64_with_svstnt1_scatter_u64index_s64() {
     let mut storage = [0 as i64; 160usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svstnt1_scatter_u64index_s64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -307,13 +307,13 @@ unsafe fn test_svldnt1_gather_u64index_s64_with_svstnt1_scatter_u64index_s64() {
         svldnt1_gather_u64index_s64(svptrue_b64(), storage.as_ptr() as *const i64, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_u64index_u64_with_svstnt1_scatter_u64index_u64() {
     let mut storage = [0 as u64; 160usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svstnt1_scatter_u64index_u64(svptrue_b64(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -324,7 +324,7 @@ unsafe fn test_svldnt1_gather_u64index_u64_with_svstnt1_scatter_u64index_u64() {
         svldnt1_gather_u64index_u64(svptrue_b64(), storage.as_ptr() as *const u64, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
@@ -353,7 +353,7 @@ unsafe fn test_svldnt1_gather_s64offset_f64_with_svstnt1_scatter_s64offset_f64()
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_s64offset_s64_with_svstnt1_scatter_s64offset_s64() {
     let mut storage = [0 as i64; 160usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 8u32.try_into().unwrap());
     svstnt1_scatter_s64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -364,13 +364,13 @@ unsafe fn test_svldnt1_gather_s64offset_s64_with_svstnt1_scatter_s64offset_s64()
         svldnt1_gather_s64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_s64offset_u64_with_svstnt1_scatter_s64offset_u64() {
     let mut storage = [0 as u64; 160usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 8u32.try_into().unwrap());
     svstnt1_scatter_s64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -381,7 +381,7 @@ unsafe fn test_svldnt1_gather_s64offset_u64_with_svstnt1_scatter_s64offset_u64()
         svldnt1_gather_s64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
@@ -410,7 +410,7 @@ unsafe fn test_svldnt1_gather_u32offset_f32_with_svstnt1_scatter_u32offset_f32()
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_u32offset_s32_with_svstnt1_scatter_u32offset_s32() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 4u32.try_into().unwrap());
     svstnt1_scatter_u32offset_s32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -421,13 +421,13 @@ unsafe fn test_svldnt1_gather_u32offset_s32_with_svstnt1_scatter_u32offset_s32()
         svldnt1_gather_u32offset_s32(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_u32offset_u32_with_svstnt1_scatter_u32offset_u32() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 4u32.try_into().unwrap());
     svstnt1_scatter_u32offset_u32(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -438,7 +438,7 @@ unsafe fn test_svldnt1_gather_u32offset_u32_with_svstnt1_scatter_u32offset_u32()
         svldnt1_gather_u32offset_u32(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
@@ -467,7 +467,7 @@ unsafe fn test_svldnt1_gather_u64offset_f64_with_svstnt1_scatter_u64offset_f64()
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_u64offset_s64_with_svstnt1_scatter_u64offset_s64() {
     let mut storage = [0 as i64; 160usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 8u32.try_into().unwrap());
     svstnt1_scatter_u64offset_s64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -478,13 +478,13 @@ unsafe fn test_svldnt1_gather_u64offset_s64_with_svstnt1_scatter_u64offset_s64()
         svldnt1_gather_u64offset_s64(svptrue_b64(), storage.as_ptr() as *const i64, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_u64offset_u64_with_svstnt1_scatter_u64offset_u64() {
     let mut storage = [0 as u64; 160usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 8u32.try_into().unwrap());
     svstnt1_scatter_u64offset_u64(svptrue_b64(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -495,7 +495,7 @@ unsafe fn test_svldnt1_gather_u64offset_u64_with_svstnt1_scatter_u64offset_u64()
         svldnt1_gather_u64offset_u64(svptrue_b64(), storage.as_ptr() as *const u64, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
@@ -525,7 +525,7 @@ unsafe fn test_svldnt1_gather_u64base_f64_with_svstnt1_scatter_u64base_f64() {
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_u64base_s64_with_svstnt1_scatter_u64base_s64() {
     let mut storage = [0 as i64; 160usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 8u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
@@ -537,13 +537,13 @@ unsafe fn test_svldnt1_gather_u64base_s64_with_svstnt1_scatter_u64base_s64() {
     let loaded = svldnt1_gather_u64base_s64(svptrue_b64(), bases);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_u64base_u64_with_svstnt1_scatter_u64base_u64() {
     let mut storage = [0 as u64; 160usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 8u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
@@ -555,7 +555,7 @@ unsafe fn test_svldnt1_gather_u64base_u64_with_svstnt1_scatter_u64base_u64() {
     let loaded = svldnt1_gather_u64base_u64(svptrue_b64(), bases);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
@@ -592,7 +592,7 @@ unsafe fn test_svldnt1_gather_u32base_index_f32_with_svstnt1_scatter_u32base_ind
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_u32base_index_s32_with_svstnt1_scatter_u32base_index_s32() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((1usize) as i32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 4u32.try_into().unwrap());
     svstnt1_scatter_u32base_index_s32(
         svptrue_b32(),
@@ -611,13 +611,13 @@ unsafe fn test_svldnt1_gather_u32base_index_s32_with_svstnt1_scatter_u32base_ind
     );
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_u32base_index_u32_with_svstnt1_scatter_u32base_index_u32() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((1usize) as u32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 4u32.try_into().unwrap());
     svstnt1_scatter_u32base_index_u32(
         svptrue_b32(),
@@ -636,7 +636,7 @@ unsafe fn test_svldnt1_gather_u32base_index_u32_with_svstnt1_scatter_u32base_ind
     );
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
@@ -666,7 +666,7 @@ unsafe fn test_svldnt1_gather_u64base_index_f64_with_svstnt1_scatter_u64base_ind
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_u64base_index_s64_with_svstnt1_scatter_u64base_index_s64() {
     let mut storage = [0 as i64; 160usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 8u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
@@ -678,13 +678,13 @@ unsafe fn test_svldnt1_gather_u64base_index_s64_with_svstnt1_scatter_u64base_ind
     let loaded = svldnt1_gather_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_u64base_index_u64_with_svstnt1_scatter_u64base_index_u64() {
     let mut storage = [0 as u64; 160usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 8u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
@@ -696,7 +696,7 @@ unsafe fn test_svldnt1_gather_u64base_index_u64_with_svstnt1_scatter_u64base_ind
     let loaded = svldnt1_gather_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
@@ -733,7 +733,7 @@ unsafe fn test_svldnt1_gather_u32base_offset_f32_with_svstnt1_scatter_u32base_of
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_u32base_offset_s32_with_svstnt1_scatter_u32base_offset_s32() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((1usize) as i32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 4u32.try_into().unwrap());
     svstnt1_scatter_u32base_offset_s32(
         svptrue_b32(),
@@ -752,13 +752,13 @@ unsafe fn test_svldnt1_gather_u32base_offset_s32_with_svstnt1_scatter_u32base_of
     );
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_u32base_offset_u32_with_svstnt1_scatter_u32base_offset_u32() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((1usize) as u32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 4u32.try_into().unwrap());
     svstnt1_scatter_u32base_offset_u32(
         svptrue_b32(),
@@ -777,7 +777,7 @@ unsafe fn test_svldnt1_gather_u32base_offset_u32_with_svstnt1_scatter_u32base_of
     );
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
@@ -807,7 +807,7 @@ unsafe fn test_svldnt1_gather_u64base_offset_f64_with_svstnt1_scatter_u64base_of
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_u64base_offset_s64_with_svstnt1_scatter_u64base_offset_s64() {
     let mut storage = [0 as i64; 160usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 8u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
@@ -819,13 +819,13 @@ unsafe fn test_svldnt1_gather_u64base_offset_s64_with_svstnt1_scatter_u64base_of
     let loaded = svldnt1_gather_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1_gather_u64base_offset_u64_with_svstnt1_scatter_u64base_offset_u64() {
     let mut storage = [0 as u64; 160usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 8u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b64(), bases, offsets);
@@ -837,13 +837,13 @@ unsafe fn test_svldnt1_gather_u64base_offset_u64_with_svstnt1_scatter_u64base_of
     let loaded = svldnt1_gather_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sb_gather_s64offset_s64_with_svstnt1b_scatter_s64offset_s64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 1u32.try_into().unwrap());
     svstnt1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -854,13 +854,13 @@ unsafe fn test_svldnt1sb_gather_s64offset_s64_with_svstnt1b_scatter_s64offset_s6
         svldnt1sb_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_s64offset_s64_with_svstnt1h_scatter_s64offset_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -871,13 +871,13 @@ unsafe fn test_svldnt1sh_gather_s64offset_s64_with_svstnt1h_scatter_s64offset_s6
         svldnt1sh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sw_gather_s64offset_s64_with_svstnt1w_scatter_s64offset_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 4u32.try_into().unwrap());
     svstnt1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -888,13 +888,13 @@ unsafe fn test_svldnt1sw_gather_s64offset_s64_with_svstnt1w_scatter_s64offset_s6
         svldnt1sw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sb_gather_s64offset_u64_with_svstnt1b_scatter_s64offset_u64() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 1u32.try_into().unwrap());
     svstnt1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -905,13 +905,13 @@ unsafe fn test_svldnt1sb_gather_s64offset_u64_with_svstnt1b_scatter_s64offset_u6
         svldnt1sb_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_s64offset_u64_with_svstnt1h_scatter_s64offset_u64() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -922,13 +922,13 @@ unsafe fn test_svldnt1sh_gather_s64offset_u64_with_svstnt1h_scatter_s64offset_u6
         svldnt1sh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sw_gather_s64offset_u64_with_svstnt1w_scatter_s64offset_u64() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 4u32.try_into().unwrap());
     svstnt1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -939,13 +939,13 @@ unsafe fn test_svldnt1sw_gather_s64offset_u64_with_svstnt1w_scatter_s64offset_u6
         svldnt1sw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sb_gather_u32offset_s32_with_svstnt1b_scatter_u32offset_s32() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 1u32.try_into().unwrap());
     svstnt1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -956,13 +956,13 @@ unsafe fn test_svldnt1sb_gather_u32offset_s32_with_svstnt1b_scatter_u32offset_s3
         svldnt1sb_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_u32offset_s32_with_svstnt1h_scatter_u32offset_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -973,13 +973,13 @@ unsafe fn test_svldnt1sh_gather_u32offset_s32_with_svstnt1h_scatter_u32offset_s3
         svldnt1sh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sb_gather_u32offset_u32_with_svstnt1b_scatter_u32offset_u32() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 1u32.try_into().unwrap());
     svstnt1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -990,13 +990,13 @@ unsafe fn test_svldnt1sb_gather_u32offset_u32_with_svstnt1b_scatter_u32offset_u3
         svldnt1sb_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_u32offset_u32_with_svstnt1h_scatter_u32offset_u32() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1007,13 +1007,13 @@ unsafe fn test_svldnt1sh_gather_u32offset_u32_with_svstnt1h_scatter_u32offset_u3
         svldnt1sh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sb_gather_u64offset_s64_with_svstnt1b_scatter_u64offset_s64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     svstnt1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1024,13 +1024,13 @@ unsafe fn test_svldnt1sb_gather_u64offset_s64_with_svstnt1b_scatter_u64offset_s6
         svldnt1sb_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_u64offset_s64_with_svstnt1h_scatter_u64offset_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1041,13 +1041,13 @@ unsafe fn test_svldnt1sh_gather_u64offset_s64_with_svstnt1h_scatter_u64offset_s6
         svldnt1sh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sw_gather_u64offset_s64_with_svstnt1w_scatter_u64offset_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     svstnt1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1058,13 +1058,13 @@ unsafe fn test_svldnt1sw_gather_u64offset_s64_with_svstnt1w_scatter_u64offset_s6
         svldnt1sw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sb_gather_u64offset_u64_with_svstnt1b_scatter_u64offset_u64() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     svstnt1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1075,13 +1075,13 @@ unsafe fn test_svldnt1sb_gather_u64offset_u64_with_svstnt1b_scatter_u64offset_u6
         svldnt1sb_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const i8, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_u64offset_u64_with_svstnt1h_scatter_u64offset_u64() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1092,13 +1092,13 @@ unsafe fn test_svldnt1sh_gather_u64offset_u64_with_svstnt1h_scatter_u64offset_u6
         svldnt1sh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const i16, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sw_gather_u64offset_u64_with_svstnt1w_scatter_u64offset_u64() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     svstnt1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1109,13 +1109,13 @@ unsafe fn test_svldnt1sw_gather_u64offset_u64_with_svstnt1w_scatter_u64offset_u6
         svldnt1sw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const i32, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sb_gather_u32base_offset_s32_with_svstnt1b_scatter_u32base_offset_s32() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((1usize) as i32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 1u32.try_into().unwrap());
     svstnt1b_scatter_u32base_offset_s32(
         svptrue_b8(),
@@ -1134,13 +1134,13 @@ unsafe fn test_svldnt1sb_gather_u32base_offset_s32_with_svstnt1b_scatter_u32base
     );
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_u32base_offset_s32_with_svstnt1h_scatter_u32base_offset_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((1usize) as i32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_u32base_offset_s32(
         svptrue_b16(),
@@ -1159,13 +1159,13 @@ unsafe fn test_svldnt1sh_gather_u32base_offset_s32_with_svstnt1h_scatter_u32base
     );
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sb_gather_u32base_offset_u32_with_svstnt1b_scatter_u32base_offset_u32() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((1usize) as u32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 1u32.try_into().unwrap());
     svstnt1b_scatter_u32base_offset_u32(
         svptrue_b8(),
@@ -1184,13 +1184,13 @@ unsafe fn test_svldnt1sb_gather_u32base_offset_u32_with_svstnt1b_scatter_u32base
     );
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_u32base_offset_u32_with_svstnt1h_scatter_u32base_offset_u32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((1usize) as u32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_u32base_offset_u32(
         svptrue_b16(),
@@ -1209,13 +1209,13 @@ unsafe fn test_svldnt1sh_gather_u32base_offset_u32_with_svstnt1h_scatter_u32base
     );
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sb_gather_u64base_offset_s64_with_svstnt1b_scatter_u64base_offset_s64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
@@ -1227,13 +1227,13 @@ unsafe fn test_svldnt1sb_gather_u64base_offset_s64_with_svstnt1b_scatter_u64base
     let loaded = svldnt1sb_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_u64base_offset_s64_with_svstnt1h_scatter_u64base_offset_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -1246,13 +1246,13 @@ unsafe fn test_svldnt1sh_gather_u64base_offset_s64_with_svstnt1h_scatter_u64base
         svldnt1sh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sw_gather_u64base_offset_s64_with_svstnt1w_scatter_u64base_offset_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -1265,13 +1265,13 @@ unsafe fn test_svldnt1sw_gather_u64base_offset_s64_with_svstnt1w_scatter_u64base
         svldnt1sw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sb_gather_u64base_offset_u64_with_svstnt1b_scatter_u64base_offset_u64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
@@ -1283,13 +1283,13 @@ unsafe fn test_svldnt1sb_gather_u64base_offset_u64_with_svstnt1b_scatter_u64base
     let loaded = svldnt1sb_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_u64base_offset_u64_with_svstnt1h_scatter_u64base_offset_u64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -1302,13 +1302,13 @@ unsafe fn test_svldnt1sh_gather_u64base_offset_u64_with_svstnt1h_scatter_u64base
         svldnt1sh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sw_gather_u64base_offset_u64_with_svstnt1w_scatter_u64base_offset_u64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -1321,13 +1321,13 @@ unsafe fn test_svldnt1sw_gather_u64base_offset_u64_with_svstnt1w_scatter_u64base
         svldnt1sw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sb_gather_u64base_s64_with_svstnt1b_scatter_u64base_s64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
@@ -1339,13 +1339,13 @@ unsafe fn test_svldnt1sb_gather_u64base_s64_with_svstnt1b_scatter_u64base_s64()
     let loaded = svldnt1sb_gather_u64base_s64(svptrue_b8(), bases);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_u64base_s64_with_svstnt1h_scatter_u64base_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -1357,13 +1357,13 @@ unsafe fn test_svldnt1sh_gather_u64base_s64_with_svstnt1h_scatter_u64base_s64()
     let loaded = svldnt1sh_gather_u64base_s64(svptrue_b16(), bases);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sw_gather_u64base_s64_with_svstnt1w_scatter_u64base_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -1375,13 +1375,13 @@ unsafe fn test_svldnt1sw_gather_u64base_s64_with_svstnt1w_scatter_u64base_s64()
     let loaded = svldnt1sw_gather_u64base_s64(svptrue_b32(), bases);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sb_gather_u64base_u64_with_svstnt1b_scatter_u64base_u64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
@@ -1393,13 +1393,13 @@ unsafe fn test_svldnt1sb_gather_u64base_u64_with_svstnt1b_scatter_u64base_u64()
     let loaded = svldnt1sb_gather_u64base_u64(svptrue_b8(), bases);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_u64base_u64_with_svstnt1h_scatter_u64base_u64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -1411,13 +1411,13 @@ unsafe fn test_svldnt1sh_gather_u64base_u64_with_svstnt1h_scatter_u64base_u64()
     let loaded = svldnt1sh_gather_u64base_u64(svptrue_b16(), bases);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sw_gather_u64base_u64_with_svstnt1w_scatter_u64base_u64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -1429,13 +1429,13 @@ unsafe fn test_svldnt1sw_gather_u64base_u64_with_svstnt1w_scatter_u64base_u64()
     let loaded = svldnt1sw_gather_u64base_u64(svptrue_b32(), bases);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_s64index_s64_with_svstnt1h_scatter_s64index_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svstnt1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1446,13 +1446,13 @@ unsafe fn test_svldnt1sh_gather_s64index_s64_with_svstnt1h_scatter_s64index_s64(
         svldnt1sh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sw_gather_s64index_s64_with_svstnt1w_scatter_s64index_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svstnt1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1463,13 +1463,13 @@ unsafe fn test_svldnt1sw_gather_s64index_s64_with_svstnt1w_scatter_s64index_s64(
         svldnt1sw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_s64index_u64_with_svstnt1h_scatter_s64index_u64() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svstnt1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1480,13 +1480,13 @@ unsafe fn test_svldnt1sh_gather_s64index_u64_with_svstnt1h_scatter_s64index_u64(
         svldnt1sh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sw_gather_s64index_u64_with_svstnt1w_scatter_s64index_u64() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svstnt1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1497,13 +1497,13 @@ unsafe fn test_svldnt1sw_gather_s64index_u64_with_svstnt1w_scatter_s64index_u64(
         svldnt1sw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_u64index_s64_with_svstnt1h_scatter_u64index_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svstnt1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1514,13 +1514,13 @@ unsafe fn test_svldnt1sh_gather_u64index_s64_with_svstnt1h_scatter_u64index_s64(
         svldnt1sh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const i16, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sw_gather_u64index_s64_with_svstnt1w_scatter_u64index_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svstnt1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1531,13 +1531,13 @@ unsafe fn test_svldnt1sw_gather_u64index_s64_with_svstnt1w_scatter_u64index_s64(
         svldnt1sw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const i32, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_u64index_u64_with_svstnt1h_scatter_u64index_u64() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svstnt1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1548,13 +1548,13 @@ unsafe fn test_svldnt1sh_gather_u64index_u64_with_svstnt1h_scatter_u64index_u64(
         svldnt1sh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const i16, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sw_gather_u64index_u64_with_svstnt1w_scatter_u64index_u64() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svstnt1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1565,13 +1565,13 @@ unsafe fn test_svldnt1sw_gather_u64index_u64_with_svstnt1w_scatter_u64index_u64(
         svldnt1sw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const i32, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_u32base_index_s32_with_svstnt1h_scatter_u32base_index_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((1usize) as i32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_u32base_index_s32(
         svptrue_b16(),
@@ -1590,13 +1590,13 @@ unsafe fn test_svldnt1sh_gather_u32base_index_s32_with_svstnt1h_scatter_u32base_
     );
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_u32base_index_u32_with_svstnt1h_scatter_u32base_index_u32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((1usize) as u32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_u32base_index_u32(
         svptrue_b16(),
@@ -1615,13 +1615,13 @@ unsafe fn test_svldnt1sh_gather_u32base_index_u32_with_svstnt1h_scatter_u32base_
     );
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_u64base_index_s64_with_svstnt1h_scatter_u64base_index_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -1633,13 +1633,13 @@ unsafe fn test_svldnt1sh_gather_u64base_index_s64_with_svstnt1h_scatter_u64base_
     let loaded = svldnt1sh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sw_gather_u64base_index_s64_with_svstnt1w_scatter_u64base_index_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -1651,13 +1651,13 @@ unsafe fn test_svldnt1sw_gather_u64base_index_s64_with_svstnt1w_scatter_u64base_
     let loaded = svldnt1sw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sh_gather_u64base_index_u64_with_svstnt1h_scatter_u64base_index_u64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -1669,13 +1669,13 @@ unsafe fn test_svldnt1sh_gather_u64base_index_u64_with_svstnt1h_scatter_u64base_
     let loaded = svldnt1sh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1sw_gather_u64base_index_u64_with_svstnt1w_scatter_u64base_index_u64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -1687,13 +1687,13 @@ unsafe fn test_svldnt1sw_gather_u64base_index_u64_with_svstnt1w_scatter_u64base_
     let loaded = svldnt1sw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1ub_gather_s64offset_s64_with_svstnt1b_scatter_s64offset_s64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 1u32.try_into().unwrap());
     svstnt1b_scatter_s64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1704,13 +1704,13 @@ unsafe fn test_svldnt1ub_gather_s64offset_s64_with_svstnt1b_scatter_s64offset_s6
         svldnt1ub_gather_s64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_s64offset_s64_with_svstnt1h_scatter_s64offset_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_s64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1721,13 +1721,13 @@ unsafe fn test_svldnt1uh_gather_s64offset_s64_with_svstnt1h_scatter_s64offset_s6
         svldnt1uh_gather_s64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uw_gather_s64offset_s64_with_svstnt1w_scatter_s64offset_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 4u32.try_into().unwrap());
     svstnt1w_scatter_s64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1738,13 +1738,13 @@ unsafe fn test_svldnt1uw_gather_s64offset_s64_with_svstnt1w_scatter_s64offset_s6
         svldnt1uw_gather_s64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1ub_gather_s64offset_u64_with_svstnt1b_scatter_s64offset_u64() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 1u32.try_into().unwrap());
     svstnt1b_scatter_s64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1755,13 +1755,13 @@ unsafe fn test_svldnt1ub_gather_s64offset_u64_with_svstnt1b_scatter_s64offset_u6
         svldnt1ub_gather_s64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_s64offset_u64_with_svstnt1h_scatter_s64offset_u64() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_s64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1772,13 +1772,13 @@ unsafe fn test_svldnt1uh_gather_s64offset_u64_with_svstnt1h_scatter_s64offset_u6
         svldnt1uh_gather_s64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uw_gather_s64offset_u64_with_svstnt1w_scatter_s64offset_u64() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_s64(0, 4u32.try_into().unwrap());
     svstnt1w_scatter_s64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1789,13 +1789,13 @@ unsafe fn test_svldnt1uw_gather_s64offset_u64_with_svstnt1w_scatter_s64offset_u6
         svldnt1uw_gather_s64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1ub_gather_u32offset_s32_with_svstnt1b_scatter_u32offset_s32() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 1u32.try_into().unwrap());
     svstnt1b_scatter_u32offset_s32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1806,13 +1806,13 @@ unsafe fn test_svldnt1ub_gather_u32offset_s32_with_svstnt1b_scatter_u32offset_s3
         svldnt1ub_gather_u32offset_s32(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_u32offset_s32_with_svstnt1h_scatter_u32offset_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((0usize) as i32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_u32offset_s32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1823,13 +1823,13 @@ unsafe fn test_svldnt1uh_gather_u32offset_s32_with_svstnt1h_scatter_u32offset_s3
         svldnt1uh_gather_u32offset_s32(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((0usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1ub_gather_u32offset_u32_with_svstnt1b_scatter_u32offset_u32() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 1u32.try_into().unwrap());
     svstnt1b_scatter_u32offset_u32(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1840,13 +1840,13 @@ unsafe fn test_svldnt1ub_gather_u32offset_u32_with_svstnt1b_scatter_u32offset_u3
         svldnt1ub_gather_u32offset_u32(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_u32offset_u32_with_svstnt1h_scatter_u32offset_u32() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((0usize) as u32, 1usize.try_into().unwrap());
     let offsets = svindex_u32(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_u32offset_u32(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1857,13 +1857,13 @@ unsafe fn test_svldnt1uh_gather_u32offset_u32_with_svstnt1h_scatter_u32offset_u3
         svldnt1uh_gather_u32offset_u32(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((0usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1ub_gather_u64offset_s64_with_svstnt1b_scatter_u64offset_s64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     svstnt1b_scatter_u64offset_s64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1874,13 +1874,13 @@ unsafe fn test_svldnt1ub_gather_u64offset_s64_with_svstnt1b_scatter_u64offset_s6
         svldnt1ub_gather_u64offset_s64(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_u64offset_s64_with_svstnt1h_scatter_u64offset_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_u64offset_s64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1891,13 +1891,13 @@ unsafe fn test_svldnt1uh_gather_u64offset_s64_with_svstnt1h_scatter_u64offset_s6
         svldnt1uh_gather_u64offset_s64(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uw_gather_u64offset_s64_with_svstnt1w_scatter_u64offset_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     svstnt1w_scatter_u64offset_s64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1908,13 +1908,13 @@ unsafe fn test_svldnt1uw_gather_u64offset_s64_with_svstnt1w_scatter_u64offset_s6
         svldnt1uw_gather_u64offset_s64(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1ub_gather_u64offset_u64_with_svstnt1b_scatter_u64offset_u64() {
     let mut storage = [0 as u8; 1280usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     svstnt1b_scatter_u64offset_u64(svptrue_b8(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1925,13 +1925,13 @@ unsafe fn test_svldnt1ub_gather_u64offset_u64_with_svstnt1b_scatter_u64offset_u6
         svldnt1ub_gather_u64offset_u64(svptrue_b8(), storage.as_ptr() as *const u8, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_u64offset_u64_with_svstnt1h_scatter_u64offset_u64() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_u64offset_u64(svptrue_b16(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1942,13 +1942,13 @@ unsafe fn test_svldnt1uh_gather_u64offset_u64_with_svstnt1h_scatter_u64offset_u6
         svldnt1uh_gather_u64offset_u64(svptrue_b16(), storage.as_ptr() as *const u16, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uw_gather_u64offset_u64_with_svstnt1w_scatter_u64offset_u64() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     svstnt1w_scatter_u64offset_u64(svptrue_b32(), storage.as_mut_ptr(), offsets, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -1959,13 +1959,13 @@ unsafe fn test_svldnt1uw_gather_u64offset_u64_with_svstnt1w_scatter_u64offset_u6
         svldnt1uw_gather_u64offset_u64(svptrue_b32(), storage.as_ptr() as *const u32, offsets);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1ub_gather_u32base_offset_s32_with_svstnt1b_scatter_u32base_offset_s32() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((1usize) as i32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 1u32.try_into().unwrap());
     svstnt1b_scatter_u32base_offset_s32(
         svptrue_b8(),
@@ -1984,13 +1984,13 @@ unsafe fn test_svldnt1ub_gather_u32base_offset_s32_with_svstnt1b_scatter_u32base
     );
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_u32base_offset_s32_with_svstnt1h_scatter_u32base_offset_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((1usize) as i32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_u32base_offset_s32(
         svptrue_b16(),
@@ -2009,13 +2009,13 @@ unsafe fn test_svldnt1uh_gather_u32base_offset_s32_with_svstnt1h_scatter_u32base
     );
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1ub_gather_u32base_offset_u32_with_svstnt1b_scatter_u32base_offset_u32() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((1usize) as u32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 1u32.try_into().unwrap());
     svstnt1b_scatter_u32base_offset_u32(
         svptrue_b8(),
@@ -2034,13 +2034,13 @@ unsafe fn test_svldnt1ub_gather_u32base_offset_u32_with_svstnt1b_scatter_u32base
     );
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_u32base_offset_u32_with_svstnt1h_scatter_u32base_offset_u32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((1usize) as u32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_u32base_offset_u32(
         svptrue_b16(),
@@ -2059,13 +2059,13 @@ unsafe fn test_svldnt1uh_gather_u32base_offset_u32_with_svstnt1h_scatter_u32base
     );
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1ub_gather_u64base_offset_s64_with_svstnt1b_scatter_u64base_offset_s64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
@@ -2077,13 +2077,13 @@ unsafe fn test_svldnt1ub_gather_u64base_offset_s64_with_svstnt1b_scatter_u64base
     let loaded = svldnt1ub_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_u64base_offset_s64_with_svstnt1h_scatter_u64base_offset_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -2096,13 +2096,13 @@ unsafe fn test_svldnt1uh_gather_u64base_offset_s64_with_svstnt1h_scatter_u64base
         svldnt1uh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uw_gather_u64base_offset_s64_with_svstnt1w_scatter_u64base_offset_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -2115,13 +2115,13 @@ unsafe fn test_svldnt1uw_gather_u64base_offset_s64_with_svstnt1w_scatter_u64base
         svldnt1uw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1ub_gather_u64base_offset_u64_with_svstnt1b_scatter_u64base_offset_u64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
@@ -2133,13 +2133,13 @@ unsafe fn test_svldnt1ub_gather_u64base_offset_u64_with_svstnt1b_scatter_u64base
     let loaded = svldnt1ub_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_u64base_offset_u64_with_svstnt1h_scatter_u64base_offset_u64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -2152,13 +2152,13 @@ unsafe fn test_svldnt1uh_gather_u64base_offset_u64_with_svstnt1h_scatter_u64base
         svldnt1uh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uw_gather_u64base_offset_u64_with_svstnt1w_scatter_u64base_offset_u64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -2171,13 +2171,13 @@ unsafe fn test_svldnt1uw_gather_u64base_offset_u64_with_svstnt1w_scatter_u64base
         svldnt1uw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1ub_gather_u64base_s64_with_svstnt1b_scatter_u64base_s64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
@@ -2189,13 +2189,13 @@ unsafe fn test_svldnt1ub_gather_u64base_s64_with_svstnt1b_scatter_u64base_s64()
     let loaded = svldnt1ub_gather_u64base_s64(svptrue_b8(), bases);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_u64base_s64_with_svstnt1h_scatter_u64base_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -2207,13 +2207,13 @@ unsafe fn test_svldnt1uh_gather_u64base_s64_with_svstnt1h_scatter_u64base_s64()
     let loaded = svldnt1uh_gather_u64base_s64(svptrue_b16(), bases);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uw_gather_u64base_s64_with_svstnt1w_scatter_u64base_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -2225,13 +2225,13 @@ unsafe fn test_svldnt1uw_gather_u64base_s64_with_svstnt1w_scatter_u64base_s64()
     let loaded = svldnt1uw_gather_u64base_s64(svptrue_b32(), bases);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1ub_gather_u64base_u64_with_svstnt1b_scatter_u64base_u64() {
     let mut storage = [0 as i8; 1280usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 1u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b8(), bases, offsets);
@@ -2243,13 +2243,13 @@ unsafe fn test_svldnt1ub_gather_u64base_u64_with_svstnt1b_scatter_u64base_u64()
     let loaded = svldnt1ub_gather_u64base_u64(svptrue_b8(), bases);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_u64base_u64_with_svstnt1h_scatter_u64base_u64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -2261,13 +2261,13 @@ unsafe fn test_svldnt1uh_gather_u64base_u64_with_svstnt1h_scatter_u64base_u64()
     let loaded = svldnt1uh_gather_u64base_u64(svptrue_b16(), bases);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uw_gather_u64base_u64_with_svstnt1w_scatter_u64base_u64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -2279,13 +2279,13 @@ unsafe fn test_svldnt1uw_gather_u64base_u64_with_svstnt1w_scatter_u64base_u64()
     let loaded = svldnt1uw_gather_u64base_u64(svptrue_b32(), bases);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_s64index_s64_with_svstnt1h_scatter_s64index_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svstnt1h_scatter_s64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2296,13 +2296,13 @@ unsafe fn test_svldnt1uh_gather_s64index_s64_with_svstnt1h_scatter_s64index_s64(
         svldnt1uh_gather_s64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uw_gather_s64index_s64_with_svstnt1w_scatter_s64index_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svstnt1w_scatter_s64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2313,13 +2313,13 @@ unsafe fn test_svldnt1uw_gather_s64index_s64_with_svstnt1w_scatter_s64index_s64(
         svldnt1uw_gather_s64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_s64index_u64_with_svstnt1h_scatter_s64index_u64() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svstnt1h_scatter_s64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2330,13 +2330,13 @@ unsafe fn test_svldnt1uh_gather_s64index_u64_with_svstnt1h_scatter_s64index_u64(
         svldnt1uh_gather_s64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uw_gather_s64index_u64_with_svstnt1w_scatter_s64index_u64() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_s64(0, 1);
     svstnt1w_scatter_s64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2347,13 +2347,13 @@ unsafe fn test_svldnt1uw_gather_s64index_u64_with_svstnt1w_scatter_s64index_u64(
         svldnt1uw_gather_s64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_u64index_s64_with_svstnt1h_scatter_u64index_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svstnt1h_scatter_u64index_s64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2364,13 +2364,13 @@ unsafe fn test_svldnt1uh_gather_u64index_s64_with_svstnt1h_scatter_u64index_s64(
         svldnt1uh_gather_u64index_s64(svptrue_b16(), storage.as_ptr() as *const u16, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uw_gather_u64index_s64_with_svstnt1w_scatter_u64index_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((0usize) as i64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svstnt1w_scatter_u64index_s64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2381,13 +2381,13 @@ unsafe fn test_svldnt1uw_gather_u64index_s64_with_svstnt1w_scatter_u64index_s64(
         svldnt1uw_gather_u64index_s64(svptrue_b32(), storage.as_ptr() as *const u32, indices);
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((0usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_u64index_u64_with_svstnt1h_scatter_u64index_u64() {
     let mut storage = [0 as u16; 640usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svstnt1h_scatter_u64index_u64(svptrue_b16(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2398,13 +2398,13 @@ unsafe fn test_svldnt1uh_gather_u64index_u64_with_svstnt1h_scatter_u64index_u64(
         svldnt1uh_gather_u64index_u64(svptrue_b16(), storage.as_ptr() as *const u16, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uw_gather_u64index_u64_with_svstnt1w_scatter_u64index_u64() {
     let mut storage = [0 as u32; 320usize];
-    let data = svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((0usize) as u64, 1usize.try_into().unwrap());
     let indices = svindex_u64(0, 1);
     svstnt1w_scatter_u64index_u64(svptrue_b32(), storage.as_mut_ptr(), indices, data);
     for (i, &val) in storage.iter().enumerate() {
@@ -2415,13 +2415,13 @@ unsafe fn test_svldnt1uw_gather_u64index_u64_with_svstnt1w_scatter_u64index_u64(
         svldnt1uw_gather_u64index_u64(svptrue_b32(), storage.as_ptr() as *const u32, indices);
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((0usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_u32base_index_s32_with_svstnt1h_scatter_u32base_index_s32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s32((1usize) as i32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_u32base_index_s32(
         svptrue_b16(),
@@ -2440,13 +2440,13 @@ unsafe fn test_svldnt1uh_gather_u32base_index_s32_with_svstnt1h_scatter_u32base_
     );
     assert_vector_matches_i32(
         loaded,
-        svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s32((1usize) as i32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_u32base_index_u32_with_svstnt1h_scatter_u32base_index_u32() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u32((1usize) as u32, 1usize.try_into().unwrap());
     let bases = svindex_u32(0, 2u32.try_into().unwrap());
     svstnt1h_scatter_u32base_index_u32(
         svptrue_b16(),
@@ -2465,13 +2465,13 @@ unsafe fn test_svldnt1uh_gather_u32base_index_u32_with_svstnt1h_scatter_u32base_
     );
     assert_vector_matches_u32(
         loaded,
-        svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u32((1usize) as u32, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_u64base_index_s64_with_svstnt1h_scatter_u64base_index_s64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -2483,13 +2483,13 @@ unsafe fn test_svldnt1uh_gather_u64base_index_s64_with_svstnt1h_scatter_u64base_
     let loaded = svldnt1uh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uw_gather_u64base_index_s64_with_svstnt1w_scatter_u64base_index_s64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_s64((1usize) as i64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -2501,13 +2501,13 @@ unsafe fn test_svldnt1uw_gather_u64base_index_s64_with_svstnt1w_scatter_u64base_
     let loaded = svldnt1uw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap());
     assert_vector_matches_i64(
         loaded,
-        svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_s64((1usize) as i64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uh_gather_u64base_index_u64_with_svstnt1h_scatter_u64base_index_u64() {
     let mut storage = [0 as i16; 640usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 2u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b16(), bases, offsets);
@@ -2519,13 +2519,13 @@ unsafe fn test_svldnt1uh_gather_u64base_index_u64_with_svstnt1h_scatter_u64base_
     let loaded = svldnt1uh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
 #[simd_test(enable = "sve,sve2")]
 unsafe fn test_svldnt1uw_gather_u64base_index_u64_with_svstnt1w_scatter_u64base_index_u64() {
     let mut storage = [0 as i32; 320usize];
-    let data = svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap());
+    let data = svindex_u64((1usize) as u64, 1usize.try_into().unwrap());
     let bases = svdup_n_u64(storage.as_ptr() as u64);
     let offsets = svindex_u64(0, 4u32.try_into().unwrap());
     let bases = svadd_u64_x(svptrue_b32(), bases, offsets);
@@ -2537,6 +2537,6 @@ unsafe fn test_svldnt1uw_gather_u64base_index_u64_with_svstnt1w_scatter_u64base_
     let loaded = svldnt1uw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap());
     assert_vector_matches_u64(
         loaded,
-        svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()),
+        svindex_u64((1usize) as u64, 1usize.try_into().unwrap()),
     );
 }
diff --git a/crates/stdarch-gen2/src/load_store_tests.rs b/crates/stdarch-gen2/src/load_store_tests.rs
index d059bcee2d..f05a209b1b 100644
--- a/crates/stdarch-gen2/src/load_store_tests.rs
+++ b/crates/stdarch-gen2/src/load_store_tests.rs
@@ -541,8 +541,17 @@ fn get_expected_range(tuple_idx: usize, props: &LoadIntrinsicProps) -> proc_macr
             quote! { #cvt_fn(#pred_fn(), #svindex_fn((#vnum_adjust #start).try_into().unwrap(), #tuple_len.try_into().unwrap()))}
         } else {
             let ret_acle = props.ret_type.as_ref().unwrap().acle_notation_repr();
+            let ret_ident = format_ident!(
+                "{}",
+                props
+                    .ret_type
+                    .as_ref()
+                    .and_then(TypeKind::base_type)
+                    .unwrap()
+                    .rust_repr()
+            );
             let svindex = format_ident!("svindex_{ret_acle}");
-            quote!(#svindex((#vnum_adjust #start).try_into().unwrap(), #tuple_len.try_into().unwrap()))
+            quote!(#svindex((#vnum_adjust #start) as #ret_ident, #tuple_len.try_into().unwrap()))
         }
     }
 }